2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
12 * Theory of operation:
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * such as drivers/scsi/sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
39 * At the top layer there is a custom ->submit_bio function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
45 *************************************************************************/
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #include <linux/pktcdvd.h>
50 #include <linux/module.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/compat.h>
54 #include <linux/kthread.h>
55 #include <linux/errno.h>
56 #include <linux/spinlock.h>
57 #include <linux/file.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/miscdevice.h>
61 #include <linux/freezer.h>
62 #include <linux/mutex.h>
63 #include <linux/slab.h>
64 #include <linux/backing-dev.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_ioctl.h>
67 #include <scsi/scsi.h>
68 #include <linux/debugfs.h>
69 #include <linux/device.h>
70 #include <linux/nospec.h>
71 #include <linux/uaccess.h>
73 #define DRIVER_NAME "pktcdvd"
75 #define pkt_err(pd, fmt, ...) \
76 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
77 #define pkt_notice(pd, fmt, ...) \
78 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
79 #define pkt_info(pd, fmt, ...) \
80 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
82 #define pkt_dbg(level, pd, fmt, ...) \
84 if (level == 2 && PACKET_DEBUG >= 2) \
85 pr_notice("%s: %s():" fmt, \
86 pd->name, __func__, ##__VA_ARGS__); \
87 else if (level == 1 && PACKET_DEBUG >= 1) \
88 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
91 #define MAX_SPEED 0xffff
93 static DEFINE_MUTEX(pktcdvd_mutex);
94 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
95 static struct proc_dir_entry *pkt_proc;
96 static int pktdev_major;
97 static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
98 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
99 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
100 static mempool_t psd_pool;
101 static struct bio_set pkt_bio_set;
103 static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
104 static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
106 /* forward declaration */
107 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
108 static int pkt_remove_dev(dev_t pkt_dev);
109 static int pkt_seq_show(struct seq_file *m, void *p);
111 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
113 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
116 /**********************************************************
117 * sysfs interface for pktcdvd
118 * by (C) 2006 Thomas Maier <balagi@justmail.de>
120 /sys/class/pktcdvd/pktcdvd[0-7]/
123 stat/packets_finished
128 write_queue/congestion_off
129 write_queue/congestion_on
130 **********************************************************/
132 static ssize_t packets_started_show(struct device *dev,
133 struct device_attribute *attr, char *buf)
135 struct pktcdvd_device *pd = dev_get_drvdata(dev);
137 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
139 static DEVICE_ATTR_RO(packets_started);
141 static ssize_t packets_finished_show(struct device *dev,
142 struct device_attribute *attr, char *buf)
144 struct pktcdvd_device *pd = dev_get_drvdata(dev);
146 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
148 static DEVICE_ATTR_RO(packets_finished);
150 static ssize_t kb_written_show(struct device *dev,
151 struct device_attribute *attr, char *buf)
153 struct pktcdvd_device *pd = dev_get_drvdata(dev);
155 return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
157 static DEVICE_ATTR_RO(kb_written);
159 static ssize_t kb_read_show(struct device *dev,
160 struct device_attribute *attr, char *buf)
162 struct pktcdvd_device *pd = dev_get_drvdata(dev);
164 return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
166 static DEVICE_ATTR_RO(kb_read);
168 static ssize_t kb_read_gather_show(struct device *dev,
169 struct device_attribute *attr, char *buf)
171 struct pktcdvd_device *pd = dev_get_drvdata(dev);
173 return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
175 static DEVICE_ATTR_RO(kb_read_gather);
177 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
178 const char *buf, size_t len)
180 struct pktcdvd_device *pd = dev_get_drvdata(dev);
183 pd->stats.pkt_started = 0;
184 pd->stats.pkt_ended = 0;
185 pd->stats.secs_w = 0;
186 pd->stats.secs_rg = 0;
187 pd->stats.secs_r = 0;
191 static DEVICE_ATTR_WO(reset);
193 static struct attribute *pkt_stat_attrs[] = {
194 &dev_attr_packets_finished.attr,
195 &dev_attr_packets_started.attr,
196 &dev_attr_kb_read.attr,
197 &dev_attr_kb_written.attr,
198 &dev_attr_kb_read_gather.attr,
199 &dev_attr_reset.attr,
203 static const struct attribute_group pkt_stat_group = {
205 .attrs = pkt_stat_attrs,
208 static ssize_t size_show(struct device *dev,
209 struct device_attribute *attr, char *buf)
211 struct pktcdvd_device *pd = dev_get_drvdata(dev);
214 spin_lock(&pd->lock);
215 n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
216 spin_unlock(&pd->lock);
219 static DEVICE_ATTR_RO(size);
221 static void init_write_congestion_marks(int* lo, int* hi)
225 *hi = min(*hi, 1000000);
229 *lo = min(*lo, *hi - 100);
238 static ssize_t congestion_off_show(struct device *dev,
239 struct device_attribute *attr, char *buf)
241 struct pktcdvd_device *pd = dev_get_drvdata(dev);
244 spin_lock(&pd->lock);
245 n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
246 spin_unlock(&pd->lock);
250 static ssize_t congestion_off_store(struct device *dev,
251 struct device_attribute *attr,
252 const char *buf, size_t len)
254 struct pktcdvd_device *pd = dev_get_drvdata(dev);
257 if (sscanf(buf, "%d", &val) == 1) {
258 spin_lock(&pd->lock);
259 pd->write_congestion_off = val;
260 init_write_congestion_marks(&pd->write_congestion_off,
261 &pd->write_congestion_on);
262 spin_unlock(&pd->lock);
266 static DEVICE_ATTR_RW(congestion_off);
268 static ssize_t congestion_on_show(struct device *dev,
269 struct device_attribute *attr, char *buf)
271 struct pktcdvd_device *pd = dev_get_drvdata(dev);
274 spin_lock(&pd->lock);
275 n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
276 spin_unlock(&pd->lock);
280 static ssize_t congestion_on_store(struct device *dev,
281 struct device_attribute *attr,
282 const char *buf, size_t len)
284 struct pktcdvd_device *pd = dev_get_drvdata(dev);
287 if (sscanf(buf, "%d", &val) == 1) {
288 spin_lock(&pd->lock);
289 pd->write_congestion_on = val;
290 init_write_congestion_marks(&pd->write_congestion_off,
291 &pd->write_congestion_on);
292 spin_unlock(&pd->lock);
296 static DEVICE_ATTR_RW(congestion_on);
298 static struct attribute *pkt_wq_attrs[] = {
299 &dev_attr_congestion_on.attr,
300 &dev_attr_congestion_off.attr,
305 static const struct attribute_group pkt_wq_group = {
306 .name = "write_queue",
307 .attrs = pkt_wq_attrs,
310 static const struct attribute_group *pkt_groups[] = {
316 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
319 pd->dev = device_create_with_groups(class_pktcdvd, NULL,
320 MKDEV(0, 0), pd, pkt_groups,
327 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
330 device_unregister(pd->dev);
334 /********************************************************************
337 remove unmap packet dev
338 device_map show mappings
339 *******************************************************************/
341 static void class_pktcdvd_release(struct class *cls)
346 static ssize_t device_map_show(struct class *c, struct class_attribute *attr,
351 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
352 for (idx = 0; idx < MAX_WRITERS; idx++) {
353 struct pktcdvd_device *pd = pkt_devs[idx];
356 n += sprintf(data+n, "%s %u:%u %u:%u\n",
358 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
359 MAJOR(pd->bdev->bd_dev),
360 MINOR(pd->bdev->bd_dev));
362 mutex_unlock(&ctl_mutex);
365 static CLASS_ATTR_RO(device_map);
367 static ssize_t add_store(struct class *c, struct class_attribute *attr,
368 const char *buf, size_t count)
370 unsigned int major, minor;
372 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
373 /* pkt_setup_dev() expects caller to hold reference to self */
374 if (!try_module_get(THIS_MODULE))
377 pkt_setup_dev(MKDEV(major, minor), NULL);
379 module_put(THIS_MODULE);
386 static CLASS_ATTR_WO(add);
388 static ssize_t remove_store(struct class *c, struct class_attribute *attr,
389 const char *buf, size_t count)
391 unsigned int major, minor;
392 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
393 pkt_remove_dev(MKDEV(major, minor));
398 static CLASS_ATTR_WO(remove);
400 static struct attribute *class_pktcdvd_attrs[] = {
401 &class_attr_add.attr,
402 &class_attr_remove.attr,
403 &class_attr_device_map.attr,
406 ATTRIBUTE_GROUPS(class_pktcdvd);
408 static int pkt_sysfs_init(void)
413 * create control files in sysfs
414 * /sys/class/pktcdvd/...
416 class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
419 class_pktcdvd->name = DRIVER_NAME;
420 class_pktcdvd->owner = THIS_MODULE;
421 class_pktcdvd->class_release = class_pktcdvd_release;
422 class_pktcdvd->class_groups = class_pktcdvd_groups;
423 ret = class_register(class_pktcdvd);
425 kfree(class_pktcdvd);
426 class_pktcdvd = NULL;
427 pr_err("failed to create class pktcdvd\n");
433 static void pkt_sysfs_cleanup(void)
436 class_destroy(class_pktcdvd);
437 class_pktcdvd = NULL;
440 /********************************************************************
443 /sys/kernel/debug/pktcdvd[0-7]/
446 *******************************************************************/
448 static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
450 return pkt_seq_show(m, p);
453 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
455 return single_open(file, pkt_debugfs_seq_show, inode->i_private);
458 static const struct file_operations debug_fops = {
459 .open = pkt_debugfs_fops_open,
462 .release = single_release,
463 .owner = THIS_MODULE,
466 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
468 if (!pkt_debugfs_root)
470 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
474 pd->dfs_f_info = debugfs_create_file("info", 0444,
475 pd->dfs_d_root, pd, &debug_fops);
478 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
480 if (!pkt_debugfs_root)
482 debugfs_remove(pd->dfs_f_info);
483 debugfs_remove(pd->dfs_d_root);
484 pd->dfs_f_info = NULL;
485 pd->dfs_d_root = NULL;
488 static void pkt_debugfs_init(void)
490 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
493 static void pkt_debugfs_cleanup(void)
495 debugfs_remove(pkt_debugfs_root);
496 pkt_debugfs_root = NULL;
499 /* ----------------------------------------------------------*/
502 static void pkt_bio_finished(struct pktcdvd_device *pd)
504 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
505 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
506 pkt_dbg(2, pd, "queue empty\n");
507 atomic_set(&pd->iosched.attention, 1);
508 wake_up(&pd->wqueue);
513 * Allocate a packet_data struct
515 static struct packet_data *pkt_alloc_packet_data(int frames)
518 struct packet_data *pkt;
520 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
524 pkt->frames = frames;
525 pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
529 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
530 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
535 spin_lock_init(&pkt->lock);
536 bio_list_init(&pkt->orig_bios);
538 for (i = 0; i < frames; i++) {
539 pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
547 for (i = 0; i < frames; i++)
548 kfree(pkt->r_bios[i]);
550 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
552 __free_page(pkt->pages[i]);
561 * Free a packet_data struct
563 static void pkt_free_packet_data(struct packet_data *pkt)
567 for (i = 0; i < pkt->frames; i++)
568 kfree(pkt->r_bios[i]);
569 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
570 __free_page(pkt->pages[i]);
575 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
577 struct packet_data *pkt, *next;
579 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
581 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
582 pkt_free_packet_data(pkt);
584 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
587 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
589 struct packet_data *pkt;
591 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
593 while (nr_packets > 0) {
594 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
596 pkt_shrink_pktlist(pd);
599 pkt->id = nr_packets;
601 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
607 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
609 struct rb_node *n = rb_next(&node->rb_node);
612 return rb_entry(n, struct pkt_rb_node, rb_node);
615 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
617 rb_erase(&node->rb_node, &pd->bio_queue);
618 mempool_free(node, &pd->rb_pool);
619 pd->bio_queue_size--;
620 BUG_ON(pd->bio_queue_size < 0);
624 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
626 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
628 struct rb_node *n = pd->bio_queue.rb_node;
629 struct rb_node *next;
630 struct pkt_rb_node *tmp;
633 BUG_ON(pd->bio_queue_size > 0);
638 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
639 if (s <= tmp->bio->bi_iter.bi_sector)
648 if (s > tmp->bio->bi_iter.bi_sector) {
649 tmp = pkt_rbtree_next(tmp);
653 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
658 * Insert a node into the pd->bio_queue rb tree.
660 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
662 struct rb_node **p = &pd->bio_queue.rb_node;
663 struct rb_node *parent = NULL;
664 sector_t s = node->bio->bi_iter.bi_sector;
665 struct pkt_rb_node *tmp;
669 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
670 if (s < tmp->bio->bi_iter.bi_sector)
675 rb_link_node(&node->rb_node, parent, p);
676 rb_insert_color(&node->rb_node, &pd->bio_queue);
677 pd->bio_queue_size++;
681 * Send a packet_command to the underlying block device and
682 * wait for completion.
684 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
686 struct request_queue *q = bdev_get_queue(pd->bdev);
687 struct scsi_cmnd *scmd;
691 rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
692 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
695 scmd = blk_mq_rq_to_pdu(rq);
698 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
704 scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
705 memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
709 rq->rq_flags |= RQF_QUIET;
711 blk_execute_rq(rq, false);
715 blk_mq_free_request(rq);
719 static const char *sense_key_string(__u8 index)
721 static const char * const info[] = {
722 "No sense", "Recovered error", "Not ready",
723 "Medium error", "Hardware error", "Illegal request",
724 "Unit attention", "Data protect", "Blank check",
727 return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
731 * A generic sense dump / resolve mechanism should be implemented across
732 * all ATAPI + SCSI devices.
734 static void pkt_dump_sense(struct pktcdvd_device *pd,
735 struct packet_command *cgc)
737 struct scsi_sense_hdr *sshdr = cgc->sshdr;
740 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
741 CDROM_PACKET_SIZE, cgc->cmd,
742 sshdr->sense_key, sshdr->asc, sshdr->ascq,
743 sense_key_string(sshdr->sense_key));
745 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
749 * flush the drive cache to media
751 static int pkt_flush_cache(struct pktcdvd_device *pd)
753 struct packet_command cgc;
755 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
756 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
760 * the IMMED bit -- we default to not setting it, although that
761 * would allow a much faster close, this is safer
766 return pkt_generic_packet(pd, &cgc);
770 * speed is given as the normal factor, e.g. 4 for 4x
772 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
773 unsigned write_speed, unsigned read_speed)
775 struct packet_command cgc;
776 struct scsi_sense_hdr sshdr;
779 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
781 cgc.cmd[0] = GPCMD_SET_SPEED;
782 cgc.cmd[2] = (read_speed >> 8) & 0xff;
783 cgc.cmd[3] = read_speed & 0xff;
784 cgc.cmd[4] = (write_speed >> 8) & 0xff;
785 cgc.cmd[5] = write_speed & 0xff;
787 ret = pkt_generic_packet(pd, &cgc);
789 pkt_dump_sense(pd, &cgc);
795 * Queue a bio for processing by the low-level CD device. Must be called
796 * from process context.
798 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
800 spin_lock(&pd->iosched.lock);
801 if (bio_data_dir(bio) == READ)
802 bio_list_add(&pd->iosched.read_queue, bio);
804 bio_list_add(&pd->iosched.write_queue, bio);
805 spin_unlock(&pd->iosched.lock);
807 atomic_set(&pd->iosched.attention, 1);
808 wake_up(&pd->wqueue);
812 * Process the queued read/write requests. This function handles special
813 * requirements for CDRW drives:
814 * - A cache flush command must be inserted before a read request if the
815 * previous request was a write.
816 * - Switching between reading and writing is slow, so don't do it more often
818 * - Optimize for throughput at the expense of latency. This means that streaming
819 * writes will never be interrupted by a read, but if the drive has to seek
820 * before the next write, switch to reading instead if there are any pending
822 * - Set the read speed according to current usage pattern. When only reading
823 * from the device, it's best to use the highest possible read speed, but
824 * when switching often between reading and writing, it's better to have the
825 * same read and write speeds.
827 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
830 if (atomic_read(&pd->iosched.attention) == 0)
832 atomic_set(&pd->iosched.attention, 0);
836 int reads_queued, writes_queued;
838 spin_lock(&pd->iosched.lock);
839 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
840 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
841 spin_unlock(&pd->iosched.lock);
843 if (!reads_queued && !writes_queued)
846 if (pd->iosched.writing) {
847 int need_write_seek = 1;
848 spin_lock(&pd->iosched.lock);
849 bio = bio_list_peek(&pd->iosched.write_queue);
850 spin_unlock(&pd->iosched.lock);
851 if (bio && (bio->bi_iter.bi_sector ==
852 pd->iosched.last_write))
854 if (need_write_seek && reads_queued) {
855 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
856 pkt_dbg(2, pd, "write, waiting\n");
860 pd->iosched.writing = 0;
863 if (!reads_queued && writes_queued) {
864 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
865 pkt_dbg(2, pd, "read, waiting\n");
868 pd->iosched.writing = 1;
872 spin_lock(&pd->iosched.lock);
873 if (pd->iosched.writing)
874 bio = bio_list_pop(&pd->iosched.write_queue);
876 bio = bio_list_pop(&pd->iosched.read_queue);
877 spin_unlock(&pd->iosched.lock);
882 if (bio_data_dir(bio) == READ)
883 pd->iosched.successive_reads +=
884 bio->bi_iter.bi_size >> 10;
886 pd->iosched.successive_reads = 0;
887 pd->iosched.last_write = bio_end_sector(bio);
889 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
890 if (pd->read_speed == pd->write_speed) {
891 pd->read_speed = MAX_SPEED;
892 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
895 if (pd->read_speed != pd->write_speed) {
896 pd->read_speed = pd->write_speed;
897 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
901 atomic_inc(&pd->cdrw.pending_bios);
902 submit_bio_noacct(bio);
907 * Special care is needed if the underlying block device has a small
908 * max_phys_segments value.
910 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
912 if ((pd->settings.size << 9) / CD_FRAMESIZE
913 <= queue_max_segments(q)) {
915 * The cdrom device can handle one segment/frame
917 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
919 } else if ((pd->settings.size << 9) / PAGE_SIZE
920 <= queue_max_segments(q)) {
922 * We can handle this case at the expense of some extra memory
923 * copies during write operations
925 set_bit(PACKET_MERGE_SEGS, &pd->flags);
928 pkt_err(pd, "cdrom max_phys_segments too small\n");
933 static void pkt_end_io_read(struct bio *bio)
935 struct packet_data *pkt = bio->bi_private;
936 struct pktcdvd_device *pd = pkt->pd;
939 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
940 bio, (unsigned long long)pkt->sector,
941 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
944 atomic_inc(&pkt->io_errors);
946 if (atomic_dec_and_test(&pkt->io_wait)) {
947 atomic_inc(&pkt->run_sm);
948 wake_up(&pd->wqueue);
950 pkt_bio_finished(pd);
953 static void pkt_end_io_packet_write(struct bio *bio)
955 struct packet_data *pkt = bio->bi_private;
956 struct pktcdvd_device *pd = pkt->pd;
959 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
961 pd->stats.pkt_ended++;
964 pkt_bio_finished(pd);
965 atomic_dec(&pkt->io_wait);
966 atomic_inc(&pkt->run_sm);
967 wake_up(&pd->wqueue);
971 * Schedule reads for the holes in a packet
973 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
978 char written[PACKET_MAX_SIZE];
980 BUG_ON(bio_list_empty(&pkt->orig_bios));
982 atomic_set(&pkt->io_wait, 0);
983 atomic_set(&pkt->io_errors, 0);
986 * Figure out which frames we need to read before we can write.
988 memset(written, 0, sizeof(written));
989 spin_lock(&pkt->lock);
990 bio_list_for_each(bio, &pkt->orig_bios) {
991 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
993 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
994 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
995 BUG_ON(first_frame < 0);
996 BUG_ON(first_frame + num_frames > pkt->frames);
997 for (f = first_frame; f < first_frame + num_frames; f++)
1000 spin_unlock(&pkt->lock);
1002 if (pkt->cache_valid) {
1003 pkt_dbg(2, pd, "zone %llx cached\n",
1004 (unsigned long long)pkt->sector);
1009 * Schedule reads for missing parts of the packet.
1011 for (f = 0; f < pkt->frames; f++) {
1017 bio = pkt->r_bios[f];
1018 bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
1019 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1020 bio->bi_end_io = pkt_end_io_read;
1021 bio->bi_private = pkt;
1023 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1024 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1025 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1026 f, pkt->pages[p], offset);
1027 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1030 atomic_inc(&pkt->io_wait);
1031 pkt_queue_bio(pd, bio);
1036 pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1037 frames_read, (unsigned long long)pkt->sector);
1038 pd->stats.pkt_started++;
1039 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1043 * Find a packet matching zone, or the least recently used packet if
1044 * there is no match.
1046 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1048 struct packet_data *pkt;
1050 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1051 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1052 list_del_init(&pkt->list);
1053 if (pkt->sector != zone)
1054 pkt->cache_valid = 0;
1062 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1064 if (pkt->cache_valid) {
1065 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1067 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1071 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1073 #if PACKET_DEBUG > 1
1074 static const char *state_name[] = {
1075 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1077 enum packet_data_state old_state = pkt->state;
1078 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
1079 pkt->id, (unsigned long long)pkt->sector,
1080 state_name[old_state], state_name[state]);
1086 * Scan the work queue to see if we can start a new packet.
1087 * returns non-zero if any work was done.
1089 static int pkt_handle_queue(struct pktcdvd_device *pd)
1091 struct packet_data *pkt, *p;
1092 struct bio *bio = NULL;
1093 sector_t zone = 0; /* Suppress gcc warning */
1094 struct pkt_rb_node *node, *first_node;
1097 atomic_set(&pd->scan_queue, 0);
1099 if (list_empty(&pd->cdrw.pkt_free_list)) {
1100 pkt_dbg(2, pd, "no pkt\n");
1105 * Try to find a zone we are not already working on.
1107 spin_lock(&pd->lock);
1108 first_node = pkt_rbtree_find(pd, pd->current_sector);
1110 n = rb_first(&pd->bio_queue);
1112 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1117 zone = get_zone(bio->bi_iter.bi_sector, pd);
1118 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1119 if (p->sector == zone) {
1126 node = pkt_rbtree_next(node);
1128 n = rb_first(&pd->bio_queue);
1130 node = rb_entry(n, struct pkt_rb_node, rb_node);
1132 if (node == first_node)
1135 spin_unlock(&pd->lock);
1137 pkt_dbg(2, pd, "no bio\n");
1141 pkt = pkt_get_packet_data(pd, zone);
1143 pd->current_sector = zone + pd->settings.size;
1145 BUG_ON(pkt->frames != pd->settings.size >> 2);
1146 pkt->write_size = 0;
1149 * Scan work queue for bios in the same zone and link them
1152 spin_lock(&pd->lock);
1153 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1154 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1156 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1157 get_zone(bio->bi_iter.bi_sector, pd));
1158 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1160 pkt_rbtree_erase(pd, node);
1161 spin_lock(&pkt->lock);
1162 bio_list_add(&pkt->orig_bios, bio);
1163 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1164 spin_unlock(&pkt->lock);
1166 /* check write congestion marks, and if bio_queue_size is
1167 * below, wake up any waiters
1169 if (pd->congested &&
1170 pd->bio_queue_size <= pd->write_congestion_off) {
1171 pd->congested = false;
1172 wake_up_var(&pd->congested);
1174 spin_unlock(&pd->lock);
1176 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1177 pkt_set_state(pkt, PACKET_WAITING_STATE);
1178 atomic_set(&pkt->run_sm, 1);
1180 spin_lock(&pd->cdrw.active_list_lock);
1181 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1182 spin_unlock(&pd->cdrw.active_list_lock);
1188 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1190 * @src: source bio list
1191 * @dst: destination bio list
1193 * Stops when it reaches the end of either the @src list or @dst list - that is,
1194 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1197 static void bio_list_copy_data(struct bio *dst, struct bio *src)
1199 struct bvec_iter src_iter = src->bi_iter;
1200 struct bvec_iter dst_iter = dst->bi_iter;
1203 if (!src_iter.bi_size) {
1208 src_iter = src->bi_iter;
1211 if (!dst_iter.bi_size) {
1216 dst_iter = dst->bi_iter;
1219 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1224 * Assemble a bio to write one packet and queue the bio for processing
1225 * by the underlying block device.
1227 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1231 bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
1233 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1234 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1235 pkt->w_bio->bi_private = pkt;
1238 for (f = 0; f < pkt->frames; f++) {
1239 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1240 unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1242 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
1245 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1248 * Fill-in bvec with data from orig_bios.
1250 spin_lock(&pkt->lock);
1251 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
1253 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1254 spin_unlock(&pkt->lock);
1256 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
1257 pkt->write_size, (unsigned long long)pkt->sector);
1259 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
1260 pkt->cache_valid = 1;
1262 pkt->cache_valid = 0;
1264 /* Start the write request */
1265 atomic_set(&pkt->io_wait, 1);
1266 pkt_queue_bio(pd, pkt->w_bio);
1269 static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
1274 pkt->cache_valid = 0;
1276 /* Finish all bios corresponding to this packet */
1277 while ((bio = bio_list_pop(&pkt->orig_bios))) {
1278 bio->bi_status = status;
1283 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1285 pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1288 switch (pkt->state) {
1289 case PACKET_WAITING_STATE:
1290 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1293 pkt->sleep_time = 0;
1294 pkt_gather_data(pd, pkt);
1295 pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1298 case PACKET_READ_WAIT_STATE:
1299 if (atomic_read(&pkt->io_wait) > 0)
1302 if (atomic_read(&pkt->io_errors) > 0) {
1303 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1305 pkt_start_write(pd, pkt);
1309 case PACKET_WRITE_WAIT_STATE:
1310 if (atomic_read(&pkt->io_wait) > 0)
1313 if (!pkt->w_bio->bi_status) {
1314 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1316 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1320 case PACKET_RECOVERY_STATE:
1321 pkt_dbg(2, pd, "No recovery possible\n");
1322 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1325 case PACKET_FINISHED_STATE:
1326 pkt_finish_packet(pkt, pkt->w_bio->bi_status);
1336 static void pkt_handle_packets(struct pktcdvd_device *pd)
1338 struct packet_data *pkt, *next;
1341 * Run state machine for active packets
1343 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1344 if (atomic_read(&pkt->run_sm) > 0) {
1345 atomic_set(&pkt->run_sm, 0);
1346 pkt_run_state_machine(pd, pkt);
1351 * Move no longer active packets to the free list
1353 spin_lock(&pd->cdrw.active_list_lock);
1354 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1355 if (pkt->state == PACKET_FINISHED_STATE) {
1356 list_del(&pkt->list);
1357 pkt_put_packet_data(pd, pkt);
1358 pkt_set_state(pkt, PACKET_IDLE_STATE);
1359 atomic_set(&pd->scan_queue, 1);
1362 spin_unlock(&pd->cdrw.active_list_lock);
1365 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1367 struct packet_data *pkt;
1370 for (i = 0; i < PACKET_NUM_STATES; i++)
1373 spin_lock(&pd->cdrw.active_list_lock);
1374 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1375 states[pkt->state]++;
1377 spin_unlock(&pd->cdrw.active_list_lock);
1381 * kcdrwd is woken up when writes have been queued for one of our
1382 * registered devices
1384 static int kcdrwd(void *foobar)
1386 struct pktcdvd_device *pd = foobar;
1387 struct packet_data *pkt;
1388 long min_sleep_time, residue;
1390 set_user_nice(current, MIN_NICE);
1394 DECLARE_WAITQUEUE(wait, current);
1397 * Wait until there is something to do
1399 add_wait_queue(&pd->wqueue, &wait);
1401 set_current_state(TASK_INTERRUPTIBLE);
1403 /* Check if we need to run pkt_handle_queue */
1404 if (atomic_read(&pd->scan_queue) > 0)
1407 /* Check if we need to run the state machine for some packet */
1408 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1409 if (atomic_read(&pkt->run_sm) > 0)
1413 /* Check if we need to process the iosched queues */
1414 if (atomic_read(&pd->iosched.attention) != 0)
1417 /* Otherwise, go to sleep */
1418 if (PACKET_DEBUG > 1) {
1419 int states[PACKET_NUM_STATES];
1420 pkt_count_states(pd, states);
1421 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1422 states[0], states[1], states[2],
1423 states[3], states[4], states[5]);
1426 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1427 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1428 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1429 min_sleep_time = pkt->sleep_time;
1432 pkt_dbg(2, pd, "sleeping\n");
1433 residue = schedule_timeout(min_sleep_time);
1434 pkt_dbg(2, pd, "wake up\n");
1436 /* make swsusp happy with our thread */
1439 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1440 if (!pkt->sleep_time)
1442 pkt->sleep_time -= min_sleep_time - residue;
1443 if (pkt->sleep_time <= 0) {
1444 pkt->sleep_time = 0;
1445 atomic_inc(&pkt->run_sm);
1449 if (kthread_should_stop())
1453 set_current_state(TASK_RUNNING);
1454 remove_wait_queue(&pd->wqueue, &wait);
1456 if (kthread_should_stop())
1460 * if pkt_handle_queue returns true, we can queue
1463 while (pkt_handle_queue(pd))
1467 * Handle packet state machine
1469 pkt_handle_packets(pd);
1472 * Handle iosched queues
1474 pkt_iosched_process_queue(pd);
1480 static void pkt_print_settings(struct pktcdvd_device *pd)
1482 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
1483 pd->settings.fp ? "Fixed" : "Variable",
1484 pd->settings.size >> 2,
1485 pd->settings.block_mode == 8 ? '1' : '2');
1488 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1490 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1492 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1493 cgc->cmd[2] = page_code | (page_control << 6);
1494 cgc->cmd[7] = cgc->buflen >> 8;
1495 cgc->cmd[8] = cgc->buflen & 0xff;
1496 cgc->data_direction = CGC_DATA_READ;
1497 return pkt_generic_packet(pd, cgc);
1500 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1502 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1503 memset(cgc->buffer, 0, 2);
1504 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1505 cgc->cmd[1] = 0x10; /* PF */
1506 cgc->cmd[7] = cgc->buflen >> 8;
1507 cgc->cmd[8] = cgc->buflen & 0xff;
1508 cgc->data_direction = CGC_DATA_WRITE;
1509 return pkt_generic_packet(pd, cgc);
1512 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1514 struct packet_command cgc;
1517 /* set up command and get the disc info */
1518 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1519 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1520 cgc.cmd[8] = cgc.buflen = 2;
1523 ret = pkt_generic_packet(pd, &cgc);
1527 /* not all drives have the same disc_info length, so requeue
1528 * packet with the length the drive tells us it can supply
1530 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1531 sizeof(di->disc_information_length);
1533 if (cgc.buflen > sizeof(disc_information))
1534 cgc.buflen = sizeof(disc_information);
1536 cgc.cmd[8] = cgc.buflen;
1537 return pkt_generic_packet(pd, &cgc);
1540 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1542 struct packet_command cgc;
1545 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1546 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1547 cgc.cmd[1] = type & 3;
1548 cgc.cmd[4] = (track & 0xff00) >> 8;
1549 cgc.cmd[5] = track & 0xff;
1553 ret = pkt_generic_packet(pd, &cgc);
1557 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1558 sizeof(ti->track_information_length);
1560 if (cgc.buflen > sizeof(track_information))
1561 cgc.buflen = sizeof(track_information);
1563 cgc.cmd[8] = cgc.buflen;
1564 return pkt_generic_packet(pd, &cgc);
1567 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1570 disc_information di;
1571 track_information ti;
1575 ret = pkt_get_disc_info(pd, &di);
1579 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1580 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1584 /* if this track is blank, try the previous. */
1587 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1592 /* if last recorded field is valid, return it. */
1594 *last_written = be32_to_cpu(ti.last_rec_address);
1596 /* make it up instead */
1597 *last_written = be32_to_cpu(ti.track_start) +
1598 be32_to_cpu(ti.track_size);
1600 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1606 * write mode select package based on pd->settings
1608 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1610 struct packet_command cgc;
1611 struct scsi_sense_hdr sshdr;
1612 write_param_page *wp;
1616 /* doesn't apply to DVD+RW or DVD-RAM */
1617 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1620 memset(buffer, 0, sizeof(buffer));
1621 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1623 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1625 pkt_dump_sense(pd, &cgc);
1629 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1630 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1631 if (size > sizeof(buffer))
1632 size = sizeof(buffer);
1637 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1639 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1641 pkt_dump_sense(pd, &cgc);
1646 * write page is offset header + block descriptor length
1648 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1650 wp->fp = pd->settings.fp;
1651 wp->track_mode = pd->settings.track_mode;
1652 wp->write_type = pd->settings.write_type;
1653 wp->data_block_type = pd->settings.block_mode;
1655 wp->multi_session = 0;
1657 #ifdef PACKET_USE_LS
1662 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1663 wp->session_format = 0;
1665 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1666 wp->session_format = 0x20;
1670 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1676 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1679 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1681 cgc.buflen = cgc.cmd[8] = size;
1682 ret = pkt_mode_select(pd, &cgc);
1684 pkt_dump_sense(pd, &cgc);
1688 pkt_print_settings(pd);
1693 * 1 -- we can write to this track, 0 -- we can't
1695 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1697 switch (pd->mmc3_profile) {
1698 case 0x1a: /* DVD+RW */
1699 case 0x12: /* DVD-RAM */
1700 /* The track is always writable on DVD+RW/DVD-RAM */
1706 if (!ti->packet || !ti->fp)
1710 * "good" settings as per Mt Fuji.
1712 if (ti->rt == 0 && ti->blank == 0)
1715 if (ti->rt == 0 && ti->blank == 1)
1718 if (ti->rt == 1 && ti->blank == 0)
1721 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1726 * 1 -- we can write to this disc, 0 -- we can't
1728 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1730 switch (pd->mmc3_profile) {
1731 case 0x0a: /* CD-RW */
1732 case 0xffff: /* MMC3 not supported */
1734 case 0x1a: /* DVD+RW */
1735 case 0x13: /* DVD-RW */
1736 case 0x12: /* DVD-RAM */
1739 pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
1745 * for disc type 0xff we should probably reserve a new track.
1746 * but i'm not sure, should we leave this to user apps? probably.
1748 if (di->disc_type == 0xff) {
1749 pkt_notice(pd, "unknown disc - no track?\n");
1753 if (di->disc_type != 0x20 && di->disc_type != 0) {
1754 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
1758 if (di->erasable == 0) {
1759 pkt_notice(pd, "disc not erasable\n");
1763 if (di->border_status == PACKET_SESSION_RESERVED) {
1764 pkt_err(pd, "can't write to last track (reserved)\n");
1771 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1773 struct packet_command cgc;
1774 unsigned char buf[12];
1775 disc_information di;
1776 track_information ti;
1779 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1780 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1782 ret = pkt_generic_packet(pd, &cgc);
1783 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1785 memset(&di, 0, sizeof(disc_information));
1786 memset(&ti, 0, sizeof(track_information));
1788 ret = pkt_get_disc_info(pd, &di);
1790 pkt_err(pd, "failed get_disc\n");
1794 if (!pkt_writable_disc(pd, &di))
1797 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1799 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1800 ret = pkt_get_track_info(pd, track, 1, &ti);
1802 pkt_err(pd, "failed get_track\n");
1806 if (!pkt_writable_track(pd, &ti)) {
1807 pkt_err(pd, "can't write to this track\n");
1812 * we keep packet size in 512 byte units, makes it easier to
1813 * deal with request calculations.
1815 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1816 if (pd->settings.size == 0) {
1817 pkt_notice(pd, "detected zero packet size!\n");
1820 if (pd->settings.size > PACKET_MAX_SECTORS) {
1821 pkt_err(pd, "packet size is too big\n");
1824 pd->settings.fp = ti.fp;
1825 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1828 pd->nwa = be32_to_cpu(ti.next_writable);
1829 set_bit(PACKET_NWA_VALID, &pd->flags);
1833 * in theory we could use lra on -RW media as well and just zero
1834 * blocks that haven't been written yet, but in practice that
1835 * is just a no-go. we'll use that for -R, naturally.
1838 pd->lra = be32_to_cpu(ti.last_rec_address);
1839 set_bit(PACKET_LRA_VALID, &pd->flags);
1841 pd->lra = 0xffffffff;
1842 set_bit(PACKET_LRA_VALID, &pd->flags);
1848 pd->settings.link_loss = 7;
1849 pd->settings.write_type = 0; /* packet */
1850 pd->settings.track_mode = ti.track_mode;
1853 * mode1 or mode2 disc
1855 switch (ti.data_mode) {
1857 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1860 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1863 pkt_err(pd, "unknown data mode\n");
1870 * enable/disable write caching on drive
1872 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1875 struct packet_command cgc;
1876 struct scsi_sense_hdr sshdr;
1877 unsigned char buf[64];
1880 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1882 cgc.buflen = pd->mode_offset + 12;
1885 * caching mode page might not be there, so quiet this command
1889 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
1893 buf[pd->mode_offset + 10] |= (!!set << 2);
1895 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1896 ret = pkt_mode_select(pd, &cgc);
1898 pkt_err(pd, "write caching control failed\n");
1899 pkt_dump_sense(pd, &cgc);
1900 } else if (!ret && set)
1901 pkt_notice(pd, "enabled write caching\n");
1905 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1907 struct packet_command cgc;
1909 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1910 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1911 cgc.cmd[4] = lockflag ? 1 : 0;
1912 return pkt_generic_packet(pd, &cgc);
1916 * Returns drive maximum write speed
1918 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1919 unsigned *write_speed)
1921 struct packet_command cgc;
1922 struct scsi_sense_hdr sshdr;
1923 unsigned char buf[256+18];
1924 unsigned char *cap_buf;
1927 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1928 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1931 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1933 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1934 sizeof(struct mode_page_header);
1935 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1937 pkt_dump_sense(pd, &cgc);
1942 offset = 20; /* Obsoleted field, used by older drives */
1943 if (cap_buf[1] >= 28)
1944 offset = 28; /* Current write speed selected */
1945 if (cap_buf[1] >= 30) {
1946 /* If the drive reports at least one "Logical Unit Write
1947 * Speed Performance Descriptor Block", use the information
1948 * in the first block. (contains the highest speed)
1950 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1955 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1959 /* These tables from cdrecord - I don't have orange book */
1960 /* standard speed CD-RW (1-4x) */
1961 static char clv_to_speed[16] = {
1962 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1963 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1965 /* high speed CD-RW (-10x) */
1966 static char hs_clv_to_speed[16] = {
1967 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1968 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1970 /* ultra high speed CD-RW */
1971 static char us_clv_to_speed[16] = {
1972 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1973 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1977 * reads the maximum media speed from ATIP
1979 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
1982 struct packet_command cgc;
1983 struct scsi_sense_hdr sshdr;
1984 unsigned char buf[64];
1985 unsigned int size, st, sp;
1988 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
1990 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1992 cgc.cmd[2] = 4; /* READ ATIP */
1994 ret = pkt_generic_packet(pd, &cgc);
1996 pkt_dump_sense(pd, &cgc);
1999 size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
2000 if (size > sizeof(buf))
2003 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
2005 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2009 ret = pkt_generic_packet(pd, &cgc);
2011 pkt_dump_sense(pd, &cgc);
2015 if (!(buf[6] & 0x40)) {
2016 pkt_notice(pd, "disc type is not CD-RW\n");
2019 if (!(buf[6] & 0x4)) {
2020 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
2024 st = (buf[6] >> 3) & 0x7; /* disc sub-type */
2026 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
2028 /* Info from cdrecord */
2030 case 0: /* standard speed */
2031 *speed = clv_to_speed[sp];
2033 case 1: /* high speed */
2034 *speed = hs_clv_to_speed[sp];
2036 case 2: /* ultra high speed */
2037 *speed = us_clv_to_speed[sp];
2040 pkt_notice(pd, "unknown disc sub-type %d\n", st);
2044 pkt_info(pd, "maximum media speed: %d\n", *speed);
2047 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
2052 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2054 struct packet_command cgc;
2055 struct scsi_sense_hdr sshdr;
2058 pkt_dbg(2, pd, "Performing OPC\n");
2060 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2062 cgc.timeout = 60*HZ;
2063 cgc.cmd[0] = GPCMD_SEND_OPC;
2065 ret = pkt_generic_packet(pd, &cgc);
2067 pkt_dump_sense(pd, &cgc);
2071 static int pkt_open_write(struct pktcdvd_device *pd)
2074 unsigned int write_speed, media_write_speed, read_speed;
2076 ret = pkt_probe_settings(pd);
2078 pkt_dbg(2, pd, "failed probe\n");
2082 ret = pkt_set_write_settings(pd);
2084 pkt_dbg(1, pd, "failed saving write settings\n");
2088 pkt_write_caching(pd, USE_WCACHING);
2090 ret = pkt_get_max_speed(pd, &write_speed);
2092 write_speed = 16 * 177;
2093 switch (pd->mmc3_profile) {
2094 case 0x13: /* DVD-RW */
2095 case 0x1a: /* DVD+RW */
2096 case 0x12: /* DVD-RAM */
2097 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
2100 ret = pkt_media_speed(pd, &media_write_speed);
2102 media_write_speed = 16;
2103 write_speed = min(write_speed, media_write_speed * 177);
2104 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
2107 read_speed = write_speed;
2109 ret = pkt_set_speed(pd, write_speed, read_speed);
2111 pkt_dbg(1, pd, "couldn't set write speed\n");
2114 pd->write_speed = write_speed;
2115 pd->read_speed = read_speed;
2117 ret = pkt_perform_opc(pd);
2119 pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
2126 * called at open time.
2128 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2132 struct request_queue *q;
2133 struct block_device *bdev;
2136 * We need to re-open the cdrom device without O_NONBLOCK to be able
2137 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2138 * so open should not fail.
2140 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
2142 ret = PTR_ERR(bdev);
2146 ret = pkt_get_last_written(pd, &lba);
2148 pkt_err(pd, "pkt_get_last_written failed\n");
2152 set_capacity(pd->disk, lba << 2);
2153 set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
2155 q = bdev_get_queue(pd->bdev);
2157 ret = pkt_open_write(pd);
2161 * Some CDRW drives can not handle writes larger than one packet,
2162 * even if the size is a multiple of the packet size.
2164 blk_queue_max_hw_sectors(q, pd->settings.size);
2165 set_bit(PACKET_WRITABLE, &pd->flags);
2167 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2168 clear_bit(PACKET_WRITABLE, &pd->flags);
2171 ret = pkt_set_segment_merging(pd, q);
2176 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2177 pkt_err(pd, "not enough memory for buffers\n");
2181 pkt_info(pd, "%lukB available on disc\n", lba << 1);
2187 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
2193 * called when the device is closed. makes sure that the device flushes
2194 * the internal cache before we close.
2196 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2198 if (flush && pkt_flush_cache(pd))
2199 pkt_dbg(1, pd, "not flushing cache\n");
2201 pkt_lock_door(pd, 0);
2203 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2204 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2206 pkt_shrink_pktlist(pd);
2209 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2211 if (dev_minor >= MAX_WRITERS)
2214 dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
2215 return pkt_devs[dev_minor];
2218 static int pkt_open(struct block_device *bdev, fmode_t mode)
2220 struct pktcdvd_device *pd = NULL;
2223 mutex_lock(&pktcdvd_mutex);
2224 mutex_lock(&ctl_mutex);
2225 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2230 BUG_ON(pd->refcnt < 0);
2233 if (pd->refcnt > 1) {
2234 if ((mode & FMODE_WRITE) &&
2235 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2240 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2244 * needed here as well, since ext2 (among others) may change
2245 * the blocksize at mount time
2247 set_blocksize(bdev, CD_FRAMESIZE);
2250 mutex_unlock(&ctl_mutex);
2251 mutex_unlock(&pktcdvd_mutex);
2257 mutex_unlock(&ctl_mutex);
2258 mutex_unlock(&pktcdvd_mutex);
2262 static void pkt_close(struct gendisk *disk, fmode_t mode)
2264 struct pktcdvd_device *pd = disk->private_data;
2266 mutex_lock(&pktcdvd_mutex);
2267 mutex_lock(&ctl_mutex);
2269 BUG_ON(pd->refcnt < 0);
2270 if (pd->refcnt == 0) {
2271 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2272 pkt_release_dev(pd, flush);
2274 mutex_unlock(&ctl_mutex);
2275 mutex_unlock(&pktcdvd_mutex);
2279 static void pkt_end_io_read_cloned(struct bio *bio)
2281 struct packet_stacked_data *psd = bio->bi_private;
2282 struct pktcdvd_device *pd = psd->pd;
2284 psd->bio->bi_status = bio->bi_status;
2286 bio_endio(psd->bio);
2287 mempool_free(psd, &psd_pool);
2288 pkt_bio_finished(pd);
2291 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2293 struct bio *cloned_bio =
2294 bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
2295 struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
2299 cloned_bio->bi_private = psd;
2300 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2301 pd->stats.secs_r += bio_sectors(bio);
2302 pkt_queue_bio(pd, cloned_bio);
2305 static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2307 struct pktcdvd_device *pd = q->queuedata;
2309 struct packet_data *pkt;
2310 int was_empty, blocked_bio;
2311 struct pkt_rb_node *node;
2313 zone = get_zone(bio->bi_iter.bi_sector, pd);
2316 * If we find a matching packet in state WAITING or READ_WAIT, we can
2317 * just append this bio to that packet.
2319 spin_lock(&pd->cdrw.active_list_lock);
2321 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2322 if (pkt->sector == zone) {
2323 spin_lock(&pkt->lock);
2324 if ((pkt->state == PACKET_WAITING_STATE) ||
2325 (pkt->state == PACKET_READ_WAIT_STATE)) {
2326 bio_list_add(&pkt->orig_bios, bio);
2328 bio->bi_iter.bi_size / CD_FRAMESIZE;
2329 if ((pkt->write_size >= pkt->frames) &&
2330 (pkt->state == PACKET_WAITING_STATE)) {
2331 atomic_inc(&pkt->run_sm);
2332 wake_up(&pd->wqueue);
2334 spin_unlock(&pkt->lock);
2335 spin_unlock(&pd->cdrw.active_list_lock);
2340 spin_unlock(&pkt->lock);
2343 spin_unlock(&pd->cdrw.active_list_lock);
2346 * Test if there is enough room left in the bio work queue
2347 * (queue size >= congestion on mark).
2348 * If not, wait till the work queue size is below the congestion off mark.
2350 spin_lock(&pd->lock);
2351 if (pd->write_congestion_on > 0
2352 && pd->bio_queue_size >= pd->write_congestion_on) {
2353 struct wait_bit_queue_entry wqe;
2355 init_wait_var_entry(&wqe, &pd->congested, 0);
2357 prepare_to_wait_event(__var_waitqueue(&pd->congested),
2359 TASK_UNINTERRUPTIBLE);
2360 if (pd->bio_queue_size <= pd->write_congestion_off)
2362 pd->congested = true;
2363 spin_unlock(&pd->lock);
2365 spin_lock(&pd->lock);
2368 spin_unlock(&pd->lock);
2371 * No matching packet found. Store the bio in the work queue.
2373 node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
2375 spin_lock(&pd->lock);
2376 BUG_ON(pd->bio_queue_size < 0);
2377 was_empty = (pd->bio_queue_size == 0);
2378 pkt_rbtree_insert(pd, node);
2379 spin_unlock(&pd->lock);
2382 * Wake up the worker thread.
2384 atomic_set(&pd->scan_queue, 1);
2386 /* This wake_up is required for correct operation */
2387 wake_up(&pd->wqueue);
2388 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2390 * This wake up is not required for correct operation,
2391 * but improves performance in some cases.
2393 wake_up(&pd->wqueue);
2397 static void pkt_submit_bio(struct bio *bio)
2399 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
2402 bio = bio_split_to_limits(bio);
2404 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2405 (unsigned long long)bio->bi_iter.bi_sector,
2406 (unsigned long long)bio_end_sector(bio));
2409 * Clone READ bios so we can have our own bi_end_io callback.
2411 if (bio_data_dir(bio) == READ) {
2412 pkt_make_request_read(pd, bio);
2416 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2417 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2418 (unsigned long long)bio->bi_iter.bi_sector);
2422 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2423 pkt_err(pd, "wrong bio size\n");
2428 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2429 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2431 if (last_zone != zone) {
2432 BUG_ON(last_zone != zone + pd->settings.size);
2434 split = bio_split(bio, last_zone -
2435 bio->bi_iter.bi_sector,
2436 GFP_NOIO, &pkt_bio_set);
2437 bio_chain(split, bio);
2442 pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
2443 } while (split != bio);
2450 static void pkt_init_queue(struct pktcdvd_device *pd)
2452 struct request_queue *q = pd->disk->queue;
2454 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2455 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2459 static int pkt_seq_show(struct seq_file *m, void *p)
2461 struct pktcdvd_device *pd = m->private;
2463 int states[PACKET_NUM_STATES];
2465 seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
2467 seq_printf(m, "\nSettings:\n");
2468 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2470 if (pd->settings.write_type == 0)
2474 seq_printf(m, "\twrite type:\t\t%s\n", msg);
2476 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2477 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2479 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2481 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2483 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2487 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2489 seq_printf(m, "\nStatistics:\n");
2490 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2491 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2492 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2493 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2494 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2496 seq_printf(m, "\nMisc:\n");
2497 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2498 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2499 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2500 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2501 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2502 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2504 seq_printf(m, "\nQueue state:\n");
2505 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2506 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2507 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2509 pkt_count_states(pd, states);
2510 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2511 states[0], states[1], states[2], states[3], states[4], states[5]);
2513 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2514 pd->write_congestion_off,
2515 pd->write_congestion_on);
2519 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2522 struct block_device *bdev;
2523 struct scsi_device *sdev;
2525 if (pd->pkt_dev == dev) {
2526 pkt_err(pd, "recursive setup not allowed\n");
2529 for (i = 0; i < MAX_WRITERS; i++) {
2530 struct pktcdvd_device *pd2 = pkt_devs[i];
2533 if (pd2->bdev->bd_dev == dev) {
2534 pkt_err(pd, "%pg already setup\n", pd2->bdev);
2537 if (pd2->pkt_dev == dev) {
2538 pkt_err(pd, "can't chain pktcdvd devices\n");
2543 bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
2545 return PTR_ERR(bdev);
2546 sdev = scsi_device_from_queue(bdev->bd_disk->queue);
2548 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2551 put_device(&sdev->sdev_gendev);
2553 /* This is safe, since we have a reference from open(). */
2554 __module_get(THIS_MODULE);
2557 set_blocksize(bdev, CD_FRAMESIZE);
2561 atomic_set(&pd->cdrw.pending_bios, 0);
2562 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2563 if (IS_ERR(pd->cdrw.thread)) {
2564 pkt_err(pd, "can't start kernel thread\n");
2568 proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
2569 pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
2573 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2574 /* This is safe: open() is still holding a reference. */
2575 module_put(THIS_MODULE);
2579 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2581 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2584 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
2585 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2587 mutex_lock(&pktcdvd_mutex);
2591 * The door gets locked when the device is opened, so we
2592 * have to unlock it or else the eject command fails.
2594 if (pd->refcnt == 1)
2595 pkt_lock_door(pd, 0);
2598 * forward selected CDROM ioctls to CD-ROM, for UDF
2600 case CDROMMULTISESSION:
2601 case CDROMREADTOCENTRY:
2602 case CDROM_LAST_WRITTEN:
2603 case CDROM_SEND_PACKET:
2604 case SCSI_IOCTL_SEND_COMMAND:
2605 if (!bdev->bd_disk->fops->ioctl)
2608 ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
2611 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
2614 mutex_unlock(&pktcdvd_mutex);
2619 static unsigned int pkt_check_events(struct gendisk *disk,
2620 unsigned int clearing)
2622 struct pktcdvd_device *pd = disk->private_data;
2623 struct gendisk *attached_disk;
2629 attached_disk = pd->bdev->bd_disk;
2630 if (!attached_disk || !attached_disk->fops->check_events)
2632 return attached_disk->fops->check_events(attached_disk, clearing);
2635 static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
2637 return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
2640 static const struct block_device_operations pktcdvd_ops = {
2641 .owner = THIS_MODULE,
2642 .submit_bio = pkt_submit_bio,
2644 .release = pkt_close,
2646 .compat_ioctl = blkdev_compat_ptr_ioctl,
2647 .check_events = pkt_check_events,
2648 .devnode = pkt_devnode,
2652 * Set up mapping from pktcdvd device to CD-ROM device.
2654 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2658 struct pktcdvd_device *pd;
2659 struct gendisk *disk;
2661 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2663 for (idx = 0; idx < MAX_WRITERS; idx++)
2666 if (idx == MAX_WRITERS) {
2667 pr_err("max %d writers supported\n", MAX_WRITERS);
2672 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2676 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
2677 sizeof(struct pkt_rb_node));
2681 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2682 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2683 spin_lock_init(&pd->cdrw.active_list_lock);
2685 spin_lock_init(&pd->lock);
2686 spin_lock_init(&pd->iosched.lock);
2687 bio_list_init(&pd->iosched.read_queue);
2688 bio_list_init(&pd->iosched.write_queue);
2689 sprintf(pd->name, DRIVER_NAME"%d", idx);
2690 init_waitqueue_head(&pd->wqueue);
2691 pd->bio_queue = RB_ROOT;
2693 pd->write_congestion_on = write_congestion_on;
2694 pd->write_congestion_off = write_congestion_off;
2697 disk = blk_alloc_disk(NUMA_NO_NODE);
2701 disk->major = pktdev_major;
2702 disk->first_minor = idx;
2704 disk->fops = &pktcdvd_ops;
2705 disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
2706 strcpy(disk->disk_name, pd->name);
2707 disk->private_data = pd;
2709 pd->pkt_dev = MKDEV(pktdev_major, idx);
2710 ret = pkt_new_dev(pd, dev);
2714 /* inherit events of the host device */
2715 disk->events = pd->bdev->bd_disk->events;
2717 ret = add_disk(disk);
2721 pkt_sysfs_dev_new(pd);
2722 pkt_debugfs_dev_new(pd);
2726 *pkt_dev = pd->pkt_dev;
2728 mutex_unlock(&ctl_mutex);
2734 mempool_exit(&pd->rb_pool);
2737 mutex_unlock(&ctl_mutex);
2738 pr_err("setup of pktcdvd device failed\n");
2743 * Tear down mapping from pktcdvd device to CD-ROM device.
2745 static int pkt_remove_dev(dev_t pkt_dev)
2747 struct pktcdvd_device *pd;
2751 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2753 for (idx = 0; idx < MAX_WRITERS; idx++) {
2755 if (pd && (pd->pkt_dev == pkt_dev))
2758 if (idx == MAX_WRITERS) {
2759 pr_debug("dev not setup\n");
2764 if (pd->refcnt > 0) {
2768 if (!IS_ERR(pd->cdrw.thread))
2769 kthread_stop(pd->cdrw.thread);
2771 pkt_devs[idx] = NULL;
2773 pkt_debugfs_dev_remove(pd);
2774 pkt_sysfs_dev_remove(pd);
2776 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2778 remove_proc_entry(pd->name, pkt_proc);
2779 pkt_dbg(1, pd, "writer unmapped\n");
2781 del_gendisk(pd->disk);
2784 mempool_exit(&pd->rb_pool);
2787 /* This is safe: open() is still holding a reference. */
2788 module_put(THIS_MODULE);
2791 mutex_unlock(&ctl_mutex);
2795 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2797 struct pktcdvd_device *pd;
2799 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2801 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2803 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2804 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2807 ctrl_cmd->pkt_dev = 0;
2809 ctrl_cmd->num_devices = MAX_WRITERS;
2811 mutex_unlock(&ctl_mutex);
2814 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2816 void __user *argp = (void __user *)arg;
2817 struct pkt_ctrl_command ctrl_cmd;
2821 if (cmd != PACKET_CTRL_CMD)
2824 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2827 switch (ctrl_cmd.command) {
2828 case PKT_CTRL_CMD_SETUP:
2829 if (!capable(CAP_SYS_ADMIN))
2831 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2832 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2834 case PKT_CTRL_CMD_TEARDOWN:
2835 if (!capable(CAP_SYS_ADMIN))
2837 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2839 case PKT_CTRL_CMD_STATUS:
2840 pkt_get_status(&ctrl_cmd);
2846 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2851 #ifdef CONFIG_COMPAT
2852 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2854 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2858 static const struct file_operations pkt_ctl_fops = {
2859 .open = nonseekable_open,
2860 .unlocked_ioctl = pkt_ctl_ioctl,
2861 #ifdef CONFIG_COMPAT
2862 .compat_ioctl = pkt_ctl_compat_ioctl,
2864 .owner = THIS_MODULE,
2865 .llseek = no_llseek,
2868 static struct miscdevice pkt_misc = {
2869 .minor = MISC_DYNAMIC_MINOR,
2870 .name = DRIVER_NAME,
2871 .nodename = "pktcdvd/control",
2872 .fops = &pkt_ctl_fops
2875 static int __init pkt_init(void)
2879 mutex_init(&ctl_mutex);
2881 ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
2882 sizeof(struct packet_stacked_data));
2885 ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
2887 mempool_exit(&psd_pool);
2891 ret = register_blkdev(pktdev_major, DRIVER_NAME);
2893 pr_err("unable to register block device\n");
2899 ret = pkt_sysfs_init();
2905 ret = misc_register(&pkt_misc);
2907 pr_err("unable to register misc device\n");
2911 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2916 pkt_debugfs_cleanup();
2917 pkt_sysfs_cleanup();
2919 unregister_blkdev(pktdev_major, DRIVER_NAME);
2921 mempool_exit(&psd_pool);
2922 bioset_exit(&pkt_bio_set);
2926 static void __exit pkt_exit(void)
2928 remove_proc_entry("driver/"DRIVER_NAME, NULL);
2929 misc_deregister(&pkt_misc);
2931 pkt_debugfs_cleanup();
2932 pkt_sysfs_cleanup();
2934 unregister_blkdev(pktdev_major, DRIVER_NAME);
2935 mempool_exit(&psd_pool);
2936 bioset_exit(&pkt_bio_set);
2939 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2940 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2941 MODULE_LICENSE("GPL");
2943 module_init(pkt_init);
2944 module_exit(pkt_exit);