2 * bsg.c - block layer implementation of the sg v3 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
14 * - Should this get merged, block/scsi_ioctl.c will be migrated into
15 * this file. To keep maintenance down, it's easier to have them
16 * seperated right now.
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/file.h>
22 #include <linux/blkdev.h>
23 #include <linux/poll.h>
24 #include <linux/cdev.h>
25 #include <linux/percpu.h>
26 #include <linux/uio.h>
27 #include <linux/bsg.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_ioctl.h>
31 #include <scsi/scsi_cmnd.h>
34 static char bsg_version[] = "block layer sg (bsg) 0.4";
38 request_queue_t *queue;
40 struct list_head busy_list;
41 struct list_head done_list;
42 struct hlist_node dev_list;
47 wait_queue_head_t wq_done;
48 wait_queue_head_t wq_free;
49 char name[BDEVNAME_SIZE];
59 #define BSG_DEFAULT_CMDS 64
64 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
66 #define dprintk(fmt, args...)
69 #define list_entry_bc(entry) list_entry((entry), struct bsg_command, list)
74 #define BSG_MAJOR (240)
76 static DEFINE_MUTEX(bsg_mutex);
77 static int bsg_device_nr;
79 #define BSG_LIST_SIZE (8)
80 #define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1))
81 static struct hlist_head bsg_device_list[BSG_LIST_SIZE];
83 static struct class *bsg_class;
84 static LIST_HEAD(bsg_class_list);
86 static struct kmem_cache *bsg_cmd_cachep;
89 * our internal command type
92 struct bsg_device *bd;
93 struct list_head list;
98 struct sg_io_v4 __user *uhdr;
99 char sense[SCSI_SENSE_BUFFERSIZE];
102 static void bsg_free_command(struct bsg_command *bc)
104 struct bsg_device *bd = bc->bd;
107 kmem_cache_free(bsg_cmd_cachep, bc);
109 spin_lock_irqsave(&bd->lock, flags);
111 spin_unlock_irqrestore(&bd->lock, flags);
113 wake_up(&bd->wq_free);
116 static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
118 struct bsg_command *bc = NULL;
120 spin_lock_irq(&bd->lock);
122 if (bd->queued_cmds >= bd->max_queue)
126 spin_unlock_irq(&bd->lock);
128 bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER);
130 spin_lock_irq(&bd->lock);
135 memset(bc, 0, sizeof(*bc));
137 INIT_LIST_HEAD(&bc->list);
138 dprintk("%s: returning free cmd %p\n", bd->name, bc);
141 spin_unlock_irq(&bd->lock);
146 bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
153 bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
156 list_add_tail(&bc->list, &bd->done_list);
157 wake_up(&bd->wq_done);
160 static inline int bsg_io_schedule(struct bsg_device *bd, int state)
165 spin_lock_irq(&bd->lock);
167 BUG_ON(bd->done_cmds > bd->queued_cmds);
170 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
171 * work to do", even though we return -ENOSPC after this same test
172 * during bsg_write() -- there, it means our buffer can't have more
173 * bsg_commands added to it, thus has no space left.
175 if (bd->done_cmds == bd->queued_cmds) {
180 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
185 prepare_to_wait(&bd->wq_done, &wait, state);
186 spin_unlock_irq(&bd->lock);
188 finish_wait(&bd->wq_done, &wait);
190 if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
195 spin_unlock_irq(&bd->lock);
200 * get a new free command, blocking if needed and specified
202 static struct bsg_command *bsg_get_command(struct bsg_device *bd)
204 struct bsg_command *bc;
208 bc = __bsg_alloc_command(bd);
212 ret = bsg_io_schedule(bd, TASK_INTERRUPTIBLE);
223 static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
224 struct sg_io_v4 *hdr, int has_write_perm)
226 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
228 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
231 if (blk_verify_command(rq->cmd, has_write_perm))
235 * fill in request structure
237 rq->cmd_len = hdr->request_len;
238 rq->cmd_type = REQ_TYPE_BLOCK_PC;
240 rq->timeout = (hdr->timeout * HZ) / 1000;
242 rq->timeout = q->sg_timeout;
244 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
250 * Check if sg_io_v4 from user is allowed and valid
253 bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
255 if (hdr->guard != 'Q')
257 if (hdr->request_len > BLK_MAX_CDB)
259 if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
260 hdr->din_xfer_len > (q->max_sectors << 9))
263 /* not supported currently */
264 if (hdr->protocol || hdr->subprotocol)
268 * looks sane, if no data then it should be fine from our POV
270 if (!hdr->dout_xfer_len && !hdr->din_xfer_len)
273 /* not supported currently */
274 if (hdr->dout_xfer_len && hdr->din_xfer_len)
277 *rw = hdr->dout_xfer_len ? WRITE : READ;
283 * map sg_io_v4 to a request.
285 static struct request *
286 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
288 request_queue_t *q = bd->queue;
290 int ret, rw = 0; /* shut up gcc */
291 unsigned int dxfer_len;
294 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
295 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
298 ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
303 * map scatter-gather elements seperately and string them to request
305 rq = blk_get_request(q, rw, GFP_KERNEL);
306 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
313 if (hdr->dout_xfer_len) {
314 dxfer_len = hdr->dout_xfer_len;
315 dxferp = (void*)(unsigned long)hdr->dout_xferp;
316 } else if (hdr->din_xfer_len) {
317 dxfer_len = hdr->din_xfer_len;
318 dxferp = (void*)(unsigned long)hdr->din_xferp;
323 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
325 dprintk("failed map at %d\n", ret);
335 * async completion call-back from the block layer, when scsi/ide/whatever
336 * calls end_that_request_last() on a request
338 static void bsg_rq_end_io(struct request *rq, int uptodate)
340 struct bsg_command *bc = rq->end_io_data;
341 struct bsg_device *bd = bc->bd;
344 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
345 bd->name, rq, bc, bc->bio, uptodate);
347 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
349 spin_lock_irqsave(&bd->lock, flags);
351 bsg_add_done_cmd(bd, bc);
352 spin_unlock_irqrestore(&bd->lock, flags);
356 * do final setup of a 'bc' and submit the matching 'rq' to the block
359 static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
360 struct bsg_command *bc, struct request *rq)
362 rq->sense = bc->sense;
366 * add bc command to busy queue and submit rq for io
370 bc->hdr.duration = jiffies;
371 spin_lock_irq(&bd->lock);
372 list_add_tail(&bc->list, &bd->busy_list);
373 spin_unlock_irq(&bd->lock);
375 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
377 rq->end_io_data = bc;
378 blk_execute_rq_nowait(q, bd->disk, rq, 1, bsg_rq_end_io);
381 static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
383 struct bsg_command *bc = NULL;
385 spin_lock_irq(&bd->lock);
387 bc = list_entry_bc(bd->done_list.next);
388 bsg_del_done_cmd(bd, bc);
390 spin_unlock_irq(&bd->lock);
396 * Get a finished command from the done list
398 static struct bsg_command *__bsg_get_done_cmd(struct bsg_device *bd, int state)
400 struct bsg_command *bc;
404 bc = bsg_next_done_cmd(bd);
408 ret = bsg_io_schedule(bd, state);
415 dprintk("%s: returning done %p\n", bd->name, bc);
420 static struct bsg_command *
421 bsg_get_done_cmd(struct bsg_device *bd, const struct iovec *iov)
423 return __bsg_get_done_cmd(bd, TASK_INTERRUPTIBLE);
426 static struct bsg_command *
427 bsg_get_done_cmd_nosignals(struct bsg_device *bd)
429 return __bsg_get_done_cmd(bd, TASK_UNINTERRUPTIBLE);
432 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
437 dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
439 * fill in all the output members
441 hdr->device_status = status_byte(rq->errors);
442 hdr->transport_status = host_byte(rq->errors);
443 hdr->driver_status = driver_byte(rq->errors);
445 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
446 hdr->info |= SG_INFO_CHECK;
447 hdr->din_resid = rq->data_len;
448 hdr->response_len = 0;
450 if (rq->sense_len && hdr->response) {
451 int len = min((unsigned int) hdr->max_response_len,
454 ret = copy_to_user((void*)(unsigned long)hdr->response,
457 hdr->response_len = len;
462 blk_rq_unmap_user(bio);
468 static int bsg_complete_all_commands(struct bsg_device *bd)
470 struct bsg_command *bc;
473 dprintk("%s: entered\n", bd->name);
475 set_bit(BSG_F_BLOCK, &bd->flags);
478 * wait for all commands to complete
482 ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE);
484 * look for -ENODATA specifically -- we'll sometimes get
485 * -ERESTARTSYS when we've taken a signal, but we can't
486 * return until we're done freeing the queue, so ignore
487 * it. The signal will get handled when we're done freeing
490 } while (ret != -ENODATA);
493 * discard done commands
497 bc = bsg_get_done_cmd_nosignals(bd);
500 * we _must_ complete before restarting, because
501 * bsg_release can't handle this failing.
503 if (PTR_ERR(bc) == -ERESTARTSYS)
510 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio);
514 bsg_free_command(bc);
520 typedef struct bsg_command *(*bsg_command_callback)(struct bsg_device *bd, const struct iovec *iov);
523 __bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc,
524 struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read)
526 struct bsg_command *bc;
527 int nr_commands, ret;
529 if (count % sizeof(struct sg_io_v4))
533 nr_commands = count / sizeof(struct sg_io_v4);
534 while (nr_commands) {
535 bc = get_bc(bd, iov);
542 * this is the only case where we need to copy data back
543 * after completing the request. so do that here,
544 * bsg_complete_work() cannot do that for us
546 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio);
548 if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr)))
551 bsg_free_command(bc);
556 buf += sizeof(struct sg_io_v4);
557 *bytes_read += sizeof(struct sg_io_v4);
564 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
566 if (file->f_flags & O_NONBLOCK)
567 clear_bit(BSG_F_BLOCK, &bd->flags);
569 set_bit(BSG_F_BLOCK, &bd->flags);
572 static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
574 if (file->f_mode & FMODE_WRITE)
575 set_bit(BSG_F_WRITE_PERM, &bd->flags);
577 clear_bit(BSG_F_WRITE_PERM, &bd->flags);
580 static inline int err_block_err(int ret)
582 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
589 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
591 struct bsg_device *bd = file->private_data;
595 dprintk("%s: read %Zd bytes\n", bd->name, count);
597 bsg_set_block(bd, file);
599 ret = __bsg_read(buf, count, bsg_get_done_cmd,
600 bd, NULL, &bytes_read);
603 if (!bytes_read || (bytes_read && err_block_err(ret)))
609 static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf,
610 size_t count, ssize_t *bytes_read)
612 struct bsg_command *bc;
614 int ret, nr_commands;
616 if (count % sizeof(struct sg_io_v4))
619 nr_commands = count / sizeof(struct sg_io_v4);
623 while (nr_commands) {
624 request_queue_t *q = bd->queue;
626 bc = bsg_get_command(bd);
635 bc->uhdr = (struct sg_io_v4 __user *) buf;
636 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
642 * get a request, fill in the blanks, and add to request queue
644 rq = bsg_map_hdr(bd, &bc->hdr);
651 bsg_add_command(bd, q, bc, rq);
655 buf += sizeof(struct sg_io_v4);
656 *bytes_read += sizeof(struct sg_io_v4);
660 bsg_free_command(bc);
666 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
668 struct bsg_device *bd = file->private_data;
672 dprintk("%s: write %Zd bytes\n", bd->name, count);
674 bsg_set_block(bd, file);
675 bsg_set_write_perm(bd, file);
678 ret = __bsg_write(bd, buf, count, &bytes_read);
682 * return bytes written on non-fatal errors
684 if (!bytes_read || (bytes_read && err_block_err(ret)))
687 dprintk("%s: returning %Zd\n", bd->name, bytes_read);
691 static struct bsg_device *bsg_alloc_device(void)
693 struct bsg_device *bd;
695 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
699 spin_lock_init(&bd->lock);
701 bd->max_queue = BSG_DEFAULT_CMDS;
703 INIT_LIST_HEAD(&bd->busy_list);
704 INIT_LIST_HEAD(&bd->done_list);
705 INIT_HLIST_NODE(&bd->dev_list);
707 init_waitqueue_head(&bd->wq_free);
708 init_waitqueue_head(&bd->wq_done);
712 static int bsg_put_device(struct bsg_device *bd)
716 mutex_lock(&bsg_mutex);
718 if (!atomic_dec_and_test(&bd->ref_count))
721 dprintk("%s: tearing down\n", bd->name);
724 * close can always block
726 set_bit(BSG_F_BLOCK, &bd->flags);
729 * correct error detection baddies here again. it's the responsibility
730 * of the app to properly reap commands before close() if it wants
731 * fool-proof error detection
733 ret = bsg_complete_all_commands(bd);
735 blk_put_queue(bd->queue);
736 hlist_del(&bd->dev_list);
739 mutex_unlock(&bsg_mutex);
743 static struct bsg_device *bsg_add_device(struct inode *inode,
744 struct gendisk *disk,
747 struct bsg_device *bd = NULL;
749 unsigned char buf[32];
752 bd = bsg_alloc_device();
754 return ERR_PTR(-ENOMEM);
757 bd->queue = disk->queue;
758 kobject_get(&disk->queue->kobj);
759 bsg_set_block(bd, file);
761 atomic_set(&bd->ref_count, 1);
762 bd->minor = iminor(inode);
763 mutex_lock(&bsg_mutex);
764 hlist_add_head(&bd->dev_list,&bsg_device_list[bsg_list_idx(bd->minor)]);
766 strncpy(bd->name, disk->disk_name, sizeof(bd->name) - 1);
767 dprintk("bound to <%s>, max queue %d\n",
768 format_dev_t(buf, inode->i_rdev), bd->max_queue);
770 mutex_unlock(&bsg_mutex);
774 static struct bsg_device *__bsg_get_device(int minor)
776 struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)];
777 struct bsg_device *bd = NULL;
778 struct hlist_node *entry;
780 mutex_lock(&bsg_mutex);
782 hlist_for_each(entry, list) {
783 bd = hlist_entry(entry, struct bsg_device, dev_list);
784 if (bd->minor == minor) {
785 atomic_inc(&bd->ref_count);
792 mutex_unlock(&bsg_mutex);
796 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
798 struct bsg_device *bd = __bsg_get_device(iminor(inode));
799 struct bsg_class_device *bcd, *__bcd;
805 * find the class device
808 mutex_lock(&bsg_mutex);
809 list_for_each_entry(__bcd, &bsg_class_list, list) {
810 if (__bcd->minor == iminor(inode)) {
815 mutex_unlock(&bsg_mutex);
818 return ERR_PTR(-ENODEV);
820 return bsg_add_device(inode, bcd->disk, file);
823 static int bsg_open(struct inode *inode, struct file *file)
825 struct bsg_device *bd = bsg_get_device(inode, file);
830 file->private_data = bd;
834 static int bsg_release(struct inode *inode, struct file *file)
836 struct bsg_device *bd = file->private_data;
838 file->private_data = NULL;
839 return bsg_put_device(bd);
842 static unsigned int bsg_poll(struct file *file, poll_table *wait)
844 struct bsg_device *bd = file->private_data;
845 unsigned int mask = 0;
847 poll_wait(file, &bd->wq_done, wait);
848 poll_wait(file, &bd->wq_free, wait);
850 spin_lock_irq(&bd->lock);
851 if (!list_empty(&bd->done_list))
852 mask |= POLLIN | POLLRDNORM;
853 if (bd->queued_cmds >= bd->max_queue)
855 spin_unlock_irq(&bd->lock);
861 bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
864 struct bsg_device *bd = file->private_data;
865 int __user *uarg = (int __user *) arg;
874 case SG_GET_COMMAND_Q:
875 return put_user(bd->max_queue, uarg);
876 case SG_SET_COMMAND_Q: {
879 if (get_user(queue, uarg))
884 spin_lock_irq(&bd->lock);
885 bd->max_queue = queue;
886 spin_unlock_irq(&bd->lock);
893 case SG_GET_VERSION_NUM:
894 case SCSI_IOCTL_GET_IDLUN:
895 case SCSI_IOCTL_GET_BUS_NUMBER:
898 case SG_GET_RESERVED_SIZE:
899 case SG_SET_RESERVED_SIZE:
900 case SG_EMULATED_HOST:
901 case SCSI_IOCTL_SEND_COMMAND: {
902 void __user *uarg = (void __user *) arg;
903 return scsi_cmd_ioctl(file, bd->queue, bd->disk, cmd, uarg);
910 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
913 rq = bsg_map_hdr(bd, &hdr);
918 blk_execute_rq(bd->queue, bd->disk, rq, 0);
919 blk_complete_sgv4_hdr_rq(rq, &hdr, bio);
921 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
927 * block device ioctls
931 return ioctl_by_bdev(bd->bdev, cmd, arg);
938 static struct file_operations bsg_fops = {
943 .release = bsg_release,
945 .owner = THIS_MODULE,
948 void bsg_unregister_disk(struct gendisk *disk)
950 struct bsg_class_device *bcd = &disk->bsg_dev;
955 mutex_lock(&bsg_mutex);
956 sysfs_remove_link(&bcd->disk->queue->kobj, "bsg");
957 class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor));
958 bcd->class_dev = NULL;
959 list_del_init(&bcd->list);
960 mutex_unlock(&bsg_mutex);
963 int bsg_register_disk(struct gendisk *disk)
965 request_queue_t *q = disk->queue;
966 struct bsg_class_device *bcd;
970 * we need a proper transport to send commands, not a stacked device
975 bcd = &disk->bsg_dev;
976 memset(bcd, 0, sizeof(*bcd));
977 INIT_LIST_HEAD(&bcd->list);
979 mutex_lock(&bsg_mutex);
980 dev = MKDEV(BSG_MAJOR, bsg_device_nr);
981 bcd->minor = bsg_device_nr;
984 bcd->class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", disk->disk_name);
987 list_add_tail(&bcd->list, &bsg_class_list);
988 if (sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"))
990 mutex_unlock(&bsg_mutex);
995 class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor));
996 mutex_unlock(&bsg_mutex);
1000 static int __init bsg_init(void)
1004 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1005 sizeof(struct bsg_command), 0, 0, NULL, NULL);
1006 if (!bsg_cmd_cachep) {
1007 printk(KERN_ERR "bsg: failed creating slab cache\n");
1011 for (i = 0; i < BSG_LIST_SIZE; i++)
1012 INIT_HLIST_HEAD(&bsg_device_list[i]);
1014 bsg_class = class_create(THIS_MODULE, "bsg");
1015 if (IS_ERR(bsg_class)) {
1016 kmem_cache_destroy(bsg_cmd_cachep);
1017 return PTR_ERR(bsg_class);
1020 ret = register_chrdev(BSG_MAJOR, "bsg", &bsg_fops);
1022 kmem_cache_destroy(bsg_cmd_cachep);
1023 class_destroy(bsg_class);
1027 printk(KERN_INFO "%s loaded\n", bsg_version);
1031 MODULE_AUTHOR("Jens Axboe");
1032 MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver");
1033 MODULE_LICENSE("GPL");
1035 subsys_initcall(bsg_init);