2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
12 * Theory of operation:
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
39 * At the top layer there is a custom make_request_fn function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
45 *************************************************************************/
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #include <linux/pktcdvd.h>
50 #include <linux/module.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/compat.h>
54 #include <linux/kthread.h>
55 #include <linux/errno.h>
56 #include <linux/spinlock.h>
57 #include <linux/file.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/miscdevice.h>
61 #include <linux/freezer.h>
62 #include <linux/mutex.h>
63 #include <linux/slab.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_ioctl.h>
66 #include <scsi/scsi.h>
67 #include <linux/debugfs.h>
68 #include <linux/device.h>
70 #include <asm/uaccess.h>
72 #define DRIVER_NAME "pktcdvd"
74 #define pkt_err(pd, fmt, ...) \
75 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
76 #define pkt_notice(pd, fmt, ...) \
77 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
78 #define pkt_info(pd, fmt, ...) \
79 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
81 #define pkt_dbg(level, pd, fmt, ...) \
83 if (level == 2 && PACKET_DEBUG >= 2) \
84 pr_notice("%s: %s():" fmt, \
85 pd->name, __func__, ##__VA_ARGS__); \
86 else if (level == 1 && PACKET_DEBUG >= 1) \
87 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
90 #define MAX_SPEED 0xffff
92 static DEFINE_MUTEX(pktcdvd_mutex);
93 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
94 static struct proc_dir_entry *pkt_proc;
95 static int pktdev_major;
96 static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
97 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
98 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
99 static mempool_t *psd_pool;
101 static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
102 static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
104 /* forward declaration */
105 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
106 static int pkt_remove_dev(dev_t pkt_dev);
107 static int pkt_seq_show(struct seq_file *m, void *p);
109 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
111 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
115 * create and register a pktcdvd kernel object.
117 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
119 struct kobject* parent,
120 struct kobj_type* ktype)
122 struct pktcdvd_kobj *p;
125 p = kzalloc(sizeof(*p), GFP_KERNEL);
129 error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
131 kobject_put(&p->kobj);
134 kobject_uevent(&p->kobj, KOBJ_ADD);
138 * remove a pktcdvd kernel object.
140 static void pkt_kobj_remove(struct pktcdvd_kobj *p)
143 kobject_put(&p->kobj);
146 * default release function for pktcdvd kernel objects.
148 static void pkt_kobj_release(struct kobject *kobj)
150 kfree(to_pktcdvdkobj(kobj));
154 /**********************************************************
156 * sysfs interface for pktcdvd
157 * by (C) 2006 Thomas Maier <balagi@justmail.de>
159 **********************************************************/
161 #define DEF_ATTR(_obj,_name,_mode) \
162 static struct attribute _obj = { .name = _name, .mode = _mode }
164 /**********************************************************
165 /sys/class/pktcdvd/pktcdvd[0-7]/
168 stat/packets_finished
173 write_queue/congestion_off
174 write_queue/congestion_on
175 **********************************************************/
177 DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
178 DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
179 DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
180 DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
181 DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
182 DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
184 static struct attribute *kobj_pkt_attrs_stat[] = {
194 DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
195 DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
196 DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
198 static struct attribute *kobj_pkt_attrs_wqueue[] = {
205 static ssize_t kobj_pkt_show(struct kobject *kobj,
206 struct attribute *attr, char *data)
208 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
211 if (strcmp(attr->name, "packets_started") == 0) {
212 n = sprintf(data, "%lu\n", pd->stats.pkt_started);
214 } else if (strcmp(attr->name, "packets_finished") == 0) {
215 n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
217 } else if (strcmp(attr->name, "kb_written") == 0) {
218 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
220 } else if (strcmp(attr->name, "kb_read") == 0) {
221 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
223 } else if (strcmp(attr->name, "kb_read_gather") == 0) {
224 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
226 } else if (strcmp(attr->name, "size") == 0) {
227 spin_lock(&pd->lock);
228 v = pd->bio_queue_size;
229 spin_unlock(&pd->lock);
230 n = sprintf(data, "%d\n", v);
232 } else if (strcmp(attr->name, "congestion_off") == 0) {
233 spin_lock(&pd->lock);
234 v = pd->write_congestion_off;
235 spin_unlock(&pd->lock);
236 n = sprintf(data, "%d\n", v);
238 } else if (strcmp(attr->name, "congestion_on") == 0) {
239 spin_lock(&pd->lock);
240 v = pd->write_congestion_on;
241 spin_unlock(&pd->lock);
242 n = sprintf(data, "%d\n", v);
247 static void init_write_congestion_marks(int* lo, int* hi)
251 *hi = min(*hi, 1000000);
255 *lo = min(*lo, *hi - 100);
264 static ssize_t kobj_pkt_store(struct kobject *kobj,
265 struct attribute *attr,
266 const char *data, size_t len)
268 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
271 if (strcmp(attr->name, "reset") == 0 && len > 0) {
272 pd->stats.pkt_started = 0;
273 pd->stats.pkt_ended = 0;
274 pd->stats.secs_w = 0;
275 pd->stats.secs_rg = 0;
276 pd->stats.secs_r = 0;
278 } else if (strcmp(attr->name, "congestion_off") == 0
279 && sscanf(data, "%d", &val) == 1) {
280 spin_lock(&pd->lock);
281 pd->write_congestion_off = val;
282 init_write_congestion_marks(&pd->write_congestion_off,
283 &pd->write_congestion_on);
284 spin_unlock(&pd->lock);
286 } else if (strcmp(attr->name, "congestion_on") == 0
287 && sscanf(data, "%d", &val) == 1) {
288 spin_lock(&pd->lock);
289 pd->write_congestion_on = val;
290 init_write_congestion_marks(&pd->write_congestion_off,
291 &pd->write_congestion_on);
292 spin_unlock(&pd->lock);
297 static const struct sysfs_ops kobj_pkt_ops = {
298 .show = kobj_pkt_show,
299 .store = kobj_pkt_store
301 static struct kobj_type kobj_pkt_type_stat = {
302 .release = pkt_kobj_release,
303 .sysfs_ops = &kobj_pkt_ops,
304 .default_attrs = kobj_pkt_attrs_stat
306 static struct kobj_type kobj_pkt_type_wqueue = {
307 .release = pkt_kobj_release,
308 .sysfs_ops = &kobj_pkt_ops,
309 .default_attrs = kobj_pkt_attrs_wqueue
312 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
315 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
321 pd->kobj_stat = pkt_kobj_create(pd, "stat",
323 &kobj_pkt_type_stat);
324 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
326 &kobj_pkt_type_wqueue);
330 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
332 pkt_kobj_remove(pd->kobj_stat);
333 pkt_kobj_remove(pd->kobj_wqueue);
335 device_unregister(pd->dev);
339 /********************************************************************
342 remove unmap packet dev
343 device_map show mappings
344 *******************************************************************/
346 static void class_pktcdvd_release(struct class *cls)
350 static ssize_t class_pktcdvd_show_map(struct class *c,
351 struct class_attribute *attr,
356 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
357 for (idx = 0; idx < MAX_WRITERS; idx++) {
358 struct pktcdvd_device *pd = pkt_devs[idx];
361 n += sprintf(data+n, "%s %u:%u %u:%u\n",
363 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
364 MAJOR(pd->bdev->bd_dev),
365 MINOR(pd->bdev->bd_dev));
367 mutex_unlock(&ctl_mutex);
371 static ssize_t class_pktcdvd_store_add(struct class *c,
372 struct class_attribute *attr,
376 unsigned int major, minor;
378 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
379 /* pkt_setup_dev() expects caller to hold reference to self */
380 if (!try_module_get(THIS_MODULE))
383 pkt_setup_dev(MKDEV(major, minor), NULL);
385 module_put(THIS_MODULE);
393 static ssize_t class_pktcdvd_store_remove(struct class *c,
394 struct class_attribute *attr,
398 unsigned int major, minor;
399 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
400 pkt_remove_dev(MKDEV(major, minor));
406 static struct class_attribute class_pktcdvd_attrs[] = {
407 __ATTR(add, 0200, NULL, class_pktcdvd_store_add),
408 __ATTR(remove, 0200, NULL, class_pktcdvd_store_remove),
409 __ATTR(device_map, 0444, class_pktcdvd_show_map, NULL),
414 static int pkt_sysfs_init(void)
419 * create control files in sysfs
420 * /sys/class/pktcdvd/...
422 class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
425 class_pktcdvd->name = DRIVER_NAME;
426 class_pktcdvd->owner = THIS_MODULE;
427 class_pktcdvd->class_release = class_pktcdvd_release;
428 class_pktcdvd->class_attrs = class_pktcdvd_attrs;
429 ret = class_register(class_pktcdvd);
431 kfree(class_pktcdvd);
432 class_pktcdvd = NULL;
433 pr_err("failed to create class pktcdvd\n");
439 static void pkt_sysfs_cleanup(void)
442 class_destroy(class_pktcdvd);
443 class_pktcdvd = NULL;
446 /********************************************************************
449 /sys/kernel/debug/pktcdvd[0-7]/
452 *******************************************************************/
454 static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
456 return pkt_seq_show(m, p);
459 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
461 return single_open(file, pkt_debugfs_seq_show, inode->i_private);
464 static const struct file_operations debug_fops = {
465 .open = pkt_debugfs_fops_open,
468 .release = single_release,
469 .owner = THIS_MODULE,
472 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
474 if (!pkt_debugfs_root)
476 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
480 pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
481 pd->dfs_d_root, pd, &debug_fops);
484 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
486 if (!pkt_debugfs_root)
488 debugfs_remove(pd->dfs_f_info);
489 debugfs_remove(pd->dfs_d_root);
490 pd->dfs_f_info = NULL;
491 pd->dfs_d_root = NULL;
494 static void pkt_debugfs_init(void)
496 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
499 static void pkt_debugfs_cleanup(void)
501 debugfs_remove(pkt_debugfs_root);
502 pkt_debugfs_root = NULL;
505 /* ----------------------------------------------------------*/
508 static void pkt_bio_finished(struct pktcdvd_device *pd)
510 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
511 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
512 pkt_dbg(2, pd, "queue empty\n");
513 atomic_set(&pd->iosched.attention, 1);
514 wake_up(&pd->wqueue);
519 * Allocate a packet_data struct
521 static struct packet_data *pkt_alloc_packet_data(int frames)
524 struct packet_data *pkt;
526 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
530 pkt->frames = frames;
531 pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
535 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
536 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
541 spin_lock_init(&pkt->lock);
542 bio_list_init(&pkt->orig_bios);
544 for (i = 0; i < frames; i++) {
545 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
549 pkt->r_bios[i] = bio;
555 for (i = 0; i < frames; i++) {
556 struct bio *bio = pkt->r_bios[i];
562 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
564 __free_page(pkt->pages[i]);
573 * Free a packet_data struct
575 static void pkt_free_packet_data(struct packet_data *pkt)
579 for (i = 0; i < pkt->frames; i++) {
580 struct bio *bio = pkt->r_bios[i];
584 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
585 __free_page(pkt->pages[i]);
590 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
592 struct packet_data *pkt, *next;
594 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
596 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
597 pkt_free_packet_data(pkt);
599 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
602 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
604 struct packet_data *pkt;
606 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
608 while (nr_packets > 0) {
609 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
611 pkt_shrink_pktlist(pd);
614 pkt->id = nr_packets;
616 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
622 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
624 struct rb_node *n = rb_next(&node->rb_node);
627 return rb_entry(n, struct pkt_rb_node, rb_node);
630 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
632 rb_erase(&node->rb_node, &pd->bio_queue);
633 mempool_free(node, pd->rb_pool);
634 pd->bio_queue_size--;
635 BUG_ON(pd->bio_queue_size < 0);
639 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
641 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
643 struct rb_node *n = pd->bio_queue.rb_node;
644 struct rb_node *next;
645 struct pkt_rb_node *tmp;
648 BUG_ON(pd->bio_queue_size > 0);
653 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
654 if (s <= tmp->bio->bi_sector)
663 if (s > tmp->bio->bi_sector) {
664 tmp = pkt_rbtree_next(tmp);
668 BUG_ON(s > tmp->bio->bi_sector);
673 * Insert a node into the pd->bio_queue rb tree.
675 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
677 struct rb_node **p = &pd->bio_queue.rb_node;
678 struct rb_node *parent = NULL;
679 sector_t s = node->bio->bi_sector;
680 struct pkt_rb_node *tmp;
684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
685 if (s < tmp->bio->bi_sector)
690 rb_link_node(&node->rb_node, parent, p);
691 rb_insert_color(&node->rb_node, &pd->bio_queue);
692 pd->bio_queue_size++;
696 * Send a packet_command to the underlying block device and
697 * wait for completion.
699 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
701 struct request_queue *q = bdev_get_queue(pd->bdev);
705 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
706 WRITE : READ, __GFP_WAIT);
709 if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
713 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
714 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
717 rq->cmd_type = REQ_TYPE_BLOCK_PC;
719 rq->cmd_flags |= REQ_QUIET;
721 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
729 static const char *sense_key_string(__u8 index)
731 static const char * const info[] = {
732 "No sense", "Recovered error", "Not ready",
733 "Medium error", "Hardware error", "Illegal request",
734 "Unit attention", "Data protect", "Blank check",
737 return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
741 * A generic sense dump / resolve mechanism should be implemented across
742 * all ATAPI + SCSI devices.
744 static void pkt_dump_sense(struct pktcdvd_device *pd,
745 struct packet_command *cgc)
747 struct request_sense *sense = cgc->sense;
750 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
751 CDROM_PACKET_SIZE, cgc->cmd,
752 sense->sense_key, sense->asc, sense->ascq,
753 sense_key_string(sense->sense_key));
755 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
759 * flush the drive cache to media
761 static int pkt_flush_cache(struct pktcdvd_device *pd)
763 struct packet_command cgc;
765 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
766 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
770 * the IMMED bit -- we default to not setting it, although that
771 * would allow a much faster close, this is safer
776 return pkt_generic_packet(pd, &cgc);
780 * speed is given as the normal factor, e.g. 4 for 4x
782 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
783 unsigned write_speed, unsigned read_speed)
785 struct packet_command cgc;
786 struct request_sense sense;
789 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
791 cgc.cmd[0] = GPCMD_SET_SPEED;
792 cgc.cmd[2] = (read_speed >> 8) & 0xff;
793 cgc.cmd[3] = read_speed & 0xff;
794 cgc.cmd[4] = (write_speed >> 8) & 0xff;
795 cgc.cmd[5] = write_speed & 0xff;
797 if ((ret = pkt_generic_packet(pd, &cgc)))
798 pkt_dump_sense(pd, &cgc);
804 * Queue a bio for processing by the low-level CD device. Must be called
805 * from process context.
807 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
809 spin_lock(&pd->iosched.lock);
810 if (bio_data_dir(bio) == READ)
811 bio_list_add(&pd->iosched.read_queue, bio);
813 bio_list_add(&pd->iosched.write_queue, bio);
814 spin_unlock(&pd->iosched.lock);
816 atomic_set(&pd->iosched.attention, 1);
817 wake_up(&pd->wqueue);
821 * Process the queued read/write requests. This function handles special
822 * requirements for CDRW drives:
823 * - A cache flush command must be inserted before a read request if the
824 * previous request was a write.
825 * - Switching between reading and writing is slow, so don't do it more often
827 * - Optimize for throughput at the expense of latency. This means that streaming
828 * writes will never be interrupted by a read, but if the drive has to seek
829 * before the next write, switch to reading instead if there are any pending
831 * - Set the read speed according to current usage pattern. When only reading
832 * from the device, it's best to use the highest possible read speed, but
833 * when switching often between reading and writing, it's better to have the
834 * same read and write speeds.
836 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
839 if (atomic_read(&pd->iosched.attention) == 0)
841 atomic_set(&pd->iosched.attention, 0);
845 int reads_queued, writes_queued;
847 spin_lock(&pd->iosched.lock);
848 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
849 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
850 spin_unlock(&pd->iosched.lock);
852 if (!reads_queued && !writes_queued)
855 if (pd->iosched.writing) {
856 int need_write_seek = 1;
857 spin_lock(&pd->iosched.lock);
858 bio = bio_list_peek(&pd->iosched.write_queue);
859 spin_unlock(&pd->iosched.lock);
860 if (bio && (bio->bi_sector == pd->iosched.last_write))
862 if (need_write_seek && reads_queued) {
863 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
864 pkt_dbg(2, pd, "write, waiting\n");
868 pd->iosched.writing = 0;
871 if (!reads_queued && writes_queued) {
872 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
873 pkt_dbg(2, pd, "read, waiting\n");
876 pd->iosched.writing = 1;
880 spin_lock(&pd->iosched.lock);
881 if (pd->iosched.writing)
882 bio = bio_list_pop(&pd->iosched.write_queue);
884 bio = bio_list_pop(&pd->iosched.read_queue);
885 spin_unlock(&pd->iosched.lock);
890 if (bio_data_dir(bio) == READ)
891 pd->iosched.successive_reads += bio->bi_size >> 10;
893 pd->iosched.successive_reads = 0;
894 pd->iosched.last_write = bio_end_sector(bio);
896 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
897 if (pd->read_speed == pd->write_speed) {
898 pd->read_speed = MAX_SPEED;
899 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
902 if (pd->read_speed != pd->write_speed) {
903 pd->read_speed = pd->write_speed;
904 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
908 atomic_inc(&pd->cdrw.pending_bios);
909 generic_make_request(bio);
914 * Special care is needed if the underlying block device has a small
915 * max_phys_segments value.
917 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
919 if ((pd->settings.size << 9) / CD_FRAMESIZE
920 <= queue_max_segments(q)) {
922 * The cdrom device can handle one segment/frame
924 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
926 } else if ((pd->settings.size << 9) / PAGE_SIZE
927 <= queue_max_segments(q)) {
929 * We can handle this case at the expense of some extra memory
930 * copies during write operations
932 set_bit(PACKET_MERGE_SEGS, &pd->flags);
935 pkt_err(pd, "cdrom max_phys_segments too small\n");
941 * Copy all data for this packet to pkt->pages[], so that
942 * a) The number of required segments for the write bio is minimized, which
943 * is necessary for some scsi controllers.
944 * b) The data can be used as cache to avoid read requests if we receive a
945 * new write request for the same zone.
947 static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
951 /* Copy all data to pkt->pages[] */
954 for (f = 0; f < pkt->frames; f++) {
955 if (bvec[f].bv_page != pkt->pages[p]) {
956 void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
957 void *vto = page_address(pkt->pages[p]) + offs;
958 memcpy(vto, vfrom, CD_FRAMESIZE);
959 kunmap_atomic(vfrom);
960 bvec[f].bv_page = pkt->pages[p];
961 bvec[f].bv_offset = offs;
963 BUG_ON(bvec[f].bv_offset != offs);
965 offs += CD_FRAMESIZE;
966 if (offs >= PAGE_SIZE) {
973 static void pkt_end_io_read(struct bio *bio, int err)
975 struct packet_data *pkt = bio->bi_private;
976 struct pktcdvd_device *pd = pkt->pd;
979 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
980 bio, (unsigned long long)pkt->sector,
981 (unsigned long long)bio->bi_sector, err);
984 atomic_inc(&pkt->io_errors);
985 if (atomic_dec_and_test(&pkt->io_wait)) {
986 atomic_inc(&pkt->run_sm);
987 wake_up(&pd->wqueue);
989 pkt_bio_finished(pd);
992 static void pkt_end_io_packet_write(struct bio *bio, int err)
994 struct packet_data *pkt = bio->bi_private;
995 struct pktcdvd_device *pd = pkt->pd;
998 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
1000 pd->stats.pkt_ended++;
1002 pkt_bio_finished(pd);
1003 atomic_dec(&pkt->io_wait);
1004 atomic_inc(&pkt->run_sm);
1005 wake_up(&pd->wqueue);
1009 * Schedule reads for the holes in a packet
1011 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1013 int frames_read = 0;
1016 char written[PACKET_MAX_SIZE];
1018 BUG_ON(bio_list_empty(&pkt->orig_bios));
1020 atomic_set(&pkt->io_wait, 0);
1021 atomic_set(&pkt->io_errors, 0);
1024 * Figure out which frames we need to read before we can write.
1026 memset(written, 0, sizeof(written));
1027 spin_lock(&pkt->lock);
1028 bio_list_for_each(bio, &pkt->orig_bios) {
1029 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
1030 int num_frames = bio->bi_size / CD_FRAMESIZE;
1031 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1032 BUG_ON(first_frame < 0);
1033 BUG_ON(first_frame + num_frames > pkt->frames);
1034 for (f = first_frame; f < first_frame + num_frames; f++)
1037 spin_unlock(&pkt->lock);
1039 if (pkt->cache_valid) {
1040 pkt_dbg(2, pd, "zone %llx cached\n",
1041 (unsigned long long)pkt->sector);
1046 * Schedule reads for missing parts of the packet.
1048 for (f = 0; f < pkt->frames; f++) {
1054 bio = pkt->r_bios[f];
1056 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1057 bio->bi_bdev = pd->bdev;
1058 bio->bi_end_io = pkt_end_io_read;
1059 bio->bi_private = pkt;
1061 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1062 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1063 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1064 f, pkt->pages[p], offset);
1065 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1068 atomic_inc(&pkt->io_wait);
1070 pkt_queue_bio(pd, bio);
1075 pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1076 frames_read, (unsigned long long)pkt->sector);
1077 pd->stats.pkt_started++;
1078 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1082 * Find a packet matching zone, or the least recently used packet if
1083 * there is no match.
1085 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1087 struct packet_data *pkt;
1089 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1090 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1091 list_del_init(&pkt->list);
1092 if (pkt->sector != zone)
1093 pkt->cache_valid = 0;
1101 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1103 if (pkt->cache_valid) {
1104 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1106 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1111 * recover a failed write, query for relocation if possible
1113 * returns 1 if recovery is possible, or 0 if not
1116 static int pkt_start_recovery(struct packet_data *pkt)
1119 * FIXME. We need help from the file system to implement
1120 * recovery handling.
1124 struct request *rq = pkt->rq;
1125 struct pktcdvd_device *pd = rq->rq_disk->private_data;
1126 struct block_device *pkt_bdev;
1127 struct super_block *sb = NULL;
1128 unsigned long old_block, new_block;
1129 sector_t new_sector;
1131 pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
1133 sb = get_super(pkt_bdev);
1140 if (!sb->s_op->relocate_blocks)
1143 old_block = pkt->sector / (CD_FRAMESIZE >> 9);
1144 if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
1147 new_sector = new_block * (CD_FRAMESIZE >> 9);
1148 pkt->sector = new_sector;
1150 bio_reset(pkt->bio);
1151 pkt->bio->bi_bdev = pd->bdev;
1152 pkt->bio->bi_rw = REQ_WRITE;
1153 pkt->bio->bi_sector = new_sector;
1154 pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
1155 pkt->bio->bi_vcnt = pkt->frames;
1157 pkt->bio->bi_end_io = pkt_end_io_packet_write;
1158 pkt->bio->bi_private = pkt;
1169 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1171 #if PACKET_DEBUG > 1
1172 static const char *state_name[] = {
1173 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1175 enum packet_data_state old_state = pkt->state;
1176 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
1177 pkt->id, (unsigned long long)pkt->sector,
1178 state_name[old_state], state_name[state]);
1184 * Scan the work queue to see if we can start a new packet.
1185 * returns non-zero if any work was done.
1187 static int pkt_handle_queue(struct pktcdvd_device *pd)
1189 struct packet_data *pkt, *p;
1190 struct bio *bio = NULL;
1191 sector_t zone = 0; /* Suppress gcc warning */
1192 struct pkt_rb_node *node, *first_node;
1196 atomic_set(&pd->scan_queue, 0);
1198 if (list_empty(&pd->cdrw.pkt_free_list)) {
1199 pkt_dbg(2, pd, "no pkt\n");
1204 * Try to find a zone we are not already working on.
1206 spin_lock(&pd->lock);
1207 first_node = pkt_rbtree_find(pd, pd->current_sector);
1209 n = rb_first(&pd->bio_queue);
1211 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1216 zone = get_zone(bio->bi_sector, pd);
1217 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1218 if (p->sector == zone) {
1225 node = pkt_rbtree_next(node);
1227 n = rb_first(&pd->bio_queue);
1229 node = rb_entry(n, struct pkt_rb_node, rb_node);
1231 if (node == first_node)
1234 spin_unlock(&pd->lock);
1236 pkt_dbg(2, pd, "no bio\n");
1240 pkt = pkt_get_packet_data(pd, zone);
1242 pd->current_sector = zone + pd->settings.size;
1244 BUG_ON(pkt->frames != pd->settings.size >> 2);
1245 pkt->write_size = 0;
1248 * Scan work queue for bios in the same zone and link them
1251 spin_lock(&pd->lock);
1252 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1253 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1255 pkt_dbg(2, pd, "found zone=%llx\n",
1256 (unsigned long long)get_zone(bio->bi_sector, pd));
1257 if (get_zone(bio->bi_sector, pd) != zone)
1259 pkt_rbtree_erase(pd, node);
1260 spin_lock(&pkt->lock);
1261 bio_list_add(&pkt->orig_bios, bio);
1262 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
1263 spin_unlock(&pkt->lock);
1265 /* check write congestion marks, and if bio_queue_size is
1266 below, wake up any waiters */
1267 wakeup = (pd->write_congestion_on > 0
1268 && pd->bio_queue_size <= pd->write_congestion_off);
1269 spin_unlock(&pd->lock);
1271 clear_bdi_congested(&pd->disk->queue->backing_dev_info,
1275 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1276 pkt_set_state(pkt, PACKET_WAITING_STATE);
1277 atomic_set(&pkt->run_sm, 1);
1279 spin_lock(&pd->cdrw.active_list_lock);
1280 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1281 spin_unlock(&pd->cdrw.active_list_lock);
1287 * Assemble a bio to write one packet and queue the bio for processing
1288 * by the underlying block device.
1290 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1293 struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1295 bio_reset(pkt->w_bio);
1296 pkt->w_bio->bi_sector = pkt->sector;
1297 pkt->w_bio->bi_bdev = pd->bdev;
1298 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1299 pkt->w_bio->bi_private = pkt;
1302 for (f = 0; f < pkt->frames; f++) {
1303 bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1304 bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1305 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
1308 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1311 * Fill-in bvec with data from orig_bios.
1313 spin_lock(&pkt->lock);
1314 bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
1316 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1317 spin_unlock(&pkt->lock);
1319 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
1320 pkt->write_size, (unsigned long long)pkt->sector);
1322 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
1323 pkt_make_local_copy(pkt, bvec);
1324 pkt->cache_valid = 1;
1326 pkt->cache_valid = 0;
1329 /* Start the write request */
1330 atomic_set(&pkt->io_wait, 1);
1331 pkt->w_bio->bi_rw = WRITE;
1332 pkt_queue_bio(pd, pkt->w_bio);
1335 static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
1340 pkt->cache_valid = 0;
1342 /* Finish all bios corresponding to this packet */
1343 while ((bio = bio_list_pop(&pkt->orig_bios)))
1344 bio_endio(bio, uptodate ? 0 : -EIO);
1347 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1351 pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1354 switch (pkt->state) {
1355 case PACKET_WAITING_STATE:
1356 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1359 pkt->sleep_time = 0;
1360 pkt_gather_data(pd, pkt);
1361 pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1364 case PACKET_READ_WAIT_STATE:
1365 if (atomic_read(&pkt->io_wait) > 0)
1368 if (atomic_read(&pkt->io_errors) > 0) {
1369 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1371 pkt_start_write(pd, pkt);
1375 case PACKET_WRITE_WAIT_STATE:
1376 if (atomic_read(&pkt->io_wait) > 0)
1379 if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
1380 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1382 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1386 case PACKET_RECOVERY_STATE:
1387 if (pkt_start_recovery(pkt)) {
1388 pkt_start_write(pd, pkt);
1390 pkt_dbg(2, pd, "No recovery possible\n");
1391 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1395 case PACKET_FINISHED_STATE:
1396 uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
1397 pkt_finish_packet(pkt, uptodate);
1407 static void pkt_handle_packets(struct pktcdvd_device *pd)
1409 struct packet_data *pkt, *next;
1412 * Run state machine for active packets
1414 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1415 if (atomic_read(&pkt->run_sm) > 0) {
1416 atomic_set(&pkt->run_sm, 0);
1417 pkt_run_state_machine(pd, pkt);
1422 * Move no longer active packets to the free list
1424 spin_lock(&pd->cdrw.active_list_lock);
1425 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1426 if (pkt->state == PACKET_FINISHED_STATE) {
1427 list_del(&pkt->list);
1428 pkt_put_packet_data(pd, pkt);
1429 pkt_set_state(pkt, PACKET_IDLE_STATE);
1430 atomic_set(&pd->scan_queue, 1);
1433 spin_unlock(&pd->cdrw.active_list_lock);
1436 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1438 struct packet_data *pkt;
1441 for (i = 0; i < PACKET_NUM_STATES; i++)
1444 spin_lock(&pd->cdrw.active_list_lock);
1445 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1446 states[pkt->state]++;
1448 spin_unlock(&pd->cdrw.active_list_lock);
1452 * kcdrwd is woken up when writes have been queued for one of our
1453 * registered devices
1455 static int kcdrwd(void *foobar)
1457 struct pktcdvd_device *pd = foobar;
1458 struct packet_data *pkt;
1459 long min_sleep_time, residue;
1461 set_user_nice(current, -20);
1465 DECLARE_WAITQUEUE(wait, current);
1468 * Wait until there is something to do
1470 add_wait_queue(&pd->wqueue, &wait);
1472 set_current_state(TASK_INTERRUPTIBLE);
1474 /* Check if we need to run pkt_handle_queue */
1475 if (atomic_read(&pd->scan_queue) > 0)
1478 /* Check if we need to run the state machine for some packet */
1479 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1480 if (atomic_read(&pkt->run_sm) > 0)
1484 /* Check if we need to process the iosched queues */
1485 if (atomic_read(&pd->iosched.attention) != 0)
1488 /* Otherwise, go to sleep */
1489 if (PACKET_DEBUG > 1) {
1490 int states[PACKET_NUM_STATES];
1491 pkt_count_states(pd, states);
1492 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1493 states[0], states[1], states[2],
1494 states[3], states[4], states[5]);
1497 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1498 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1499 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1500 min_sleep_time = pkt->sleep_time;
1503 pkt_dbg(2, pd, "sleeping\n");
1504 residue = schedule_timeout(min_sleep_time);
1505 pkt_dbg(2, pd, "wake up\n");
1507 /* make swsusp happy with our thread */
1510 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1511 if (!pkt->sleep_time)
1513 pkt->sleep_time -= min_sleep_time - residue;
1514 if (pkt->sleep_time <= 0) {
1515 pkt->sleep_time = 0;
1516 atomic_inc(&pkt->run_sm);
1520 if (kthread_should_stop())
1524 set_current_state(TASK_RUNNING);
1525 remove_wait_queue(&pd->wqueue, &wait);
1527 if (kthread_should_stop())
1531 * if pkt_handle_queue returns true, we can queue
1534 while (pkt_handle_queue(pd))
1538 * Handle packet state machine
1540 pkt_handle_packets(pd);
1543 * Handle iosched queues
1545 pkt_iosched_process_queue(pd);
1551 static void pkt_print_settings(struct pktcdvd_device *pd)
1553 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
1554 pd->settings.fp ? "Fixed" : "Variable",
1555 pd->settings.size >> 2,
1556 pd->settings.block_mode == 8 ? '1' : '2');
1559 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1561 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1563 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1564 cgc->cmd[2] = page_code | (page_control << 6);
1565 cgc->cmd[7] = cgc->buflen >> 8;
1566 cgc->cmd[8] = cgc->buflen & 0xff;
1567 cgc->data_direction = CGC_DATA_READ;
1568 return pkt_generic_packet(pd, cgc);
1571 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1573 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1574 memset(cgc->buffer, 0, 2);
1575 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1576 cgc->cmd[1] = 0x10; /* PF */
1577 cgc->cmd[7] = cgc->buflen >> 8;
1578 cgc->cmd[8] = cgc->buflen & 0xff;
1579 cgc->data_direction = CGC_DATA_WRITE;
1580 return pkt_generic_packet(pd, cgc);
1583 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1585 struct packet_command cgc;
1588 /* set up command and get the disc info */
1589 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1590 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1591 cgc.cmd[8] = cgc.buflen = 2;
1594 if ((ret = pkt_generic_packet(pd, &cgc)))
1597 /* not all drives have the same disc_info length, so requeue
1598 * packet with the length the drive tells us it can supply
1600 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1601 sizeof(di->disc_information_length);
1603 if (cgc.buflen > sizeof(disc_information))
1604 cgc.buflen = sizeof(disc_information);
1606 cgc.cmd[8] = cgc.buflen;
1607 return pkt_generic_packet(pd, &cgc);
1610 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1612 struct packet_command cgc;
1615 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1616 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1617 cgc.cmd[1] = type & 3;
1618 cgc.cmd[4] = (track & 0xff00) >> 8;
1619 cgc.cmd[5] = track & 0xff;
1623 if ((ret = pkt_generic_packet(pd, &cgc)))
1626 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1627 sizeof(ti->track_information_length);
1629 if (cgc.buflen > sizeof(track_information))
1630 cgc.buflen = sizeof(track_information);
1632 cgc.cmd[8] = cgc.buflen;
1633 return pkt_generic_packet(pd, &cgc);
1636 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1639 disc_information di;
1640 track_information ti;
1644 if ((ret = pkt_get_disc_info(pd, &di)))
1647 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1648 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1651 /* if this track is blank, try the previous. */
1654 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1658 /* if last recorded field is valid, return it. */
1660 *last_written = be32_to_cpu(ti.last_rec_address);
1662 /* make it up instead */
1663 *last_written = be32_to_cpu(ti.track_start) +
1664 be32_to_cpu(ti.track_size);
1666 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1672 * write mode select package based on pd->settings
1674 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1676 struct packet_command cgc;
1677 struct request_sense sense;
1678 write_param_page *wp;
1682 /* doesn't apply to DVD+RW or DVD-RAM */
1683 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1686 memset(buffer, 0, sizeof(buffer));
1687 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1689 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1690 pkt_dump_sense(pd, &cgc);
1694 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1695 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1696 if (size > sizeof(buffer))
1697 size = sizeof(buffer);
1702 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1704 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1705 pkt_dump_sense(pd, &cgc);
1710 * write page is offset header + block descriptor length
1712 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1714 wp->fp = pd->settings.fp;
1715 wp->track_mode = pd->settings.track_mode;
1716 wp->write_type = pd->settings.write_type;
1717 wp->data_block_type = pd->settings.block_mode;
1719 wp->multi_session = 0;
1721 #ifdef PACKET_USE_LS
1726 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1727 wp->session_format = 0;
1729 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1730 wp->session_format = 0x20;
1734 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1740 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1743 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1745 cgc.buflen = cgc.cmd[8] = size;
1746 if ((ret = pkt_mode_select(pd, &cgc))) {
1747 pkt_dump_sense(pd, &cgc);
1751 pkt_print_settings(pd);
1756 * 1 -- we can write to this track, 0 -- we can't
1758 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1760 switch (pd->mmc3_profile) {
1761 case 0x1a: /* DVD+RW */
1762 case 0x12: /* DVD-RAM */
1763 /* The track is always writable on DVD+RW/DVD-RAM */
1769 if (!ti->packet || !ti->fp)
1773 * "good" settings as per Mt Fuji.
1775 if (ti->rt == 0 && ti->blank == 0)
1778 if (ti->rt == 0 && ti->blank == 1)
1781 if (ti->rt == 1 && ti->blank == 0)
1784 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1789 * 1 -- we can write to this disc, 0 -- we can't
1791 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1793 switch (pd->mmc3_profile) {
1794 case 0x0a: /* CD-RW */
1795 case 0xffff: /* MMC3 not supported */
1797 case 0x1a: /* DVD+RW */
1798 case 0x13: /* DVD-RW */
1799 case 0x12: /* DVD-RAM */
1802 pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
1808 * for disc type 0xff we should probably reserve a new track.
1809 * but i'm not sure, should we leave this to user apps? probably.
1811 if (di->disc_type == 0xff) {
1812 pkt_notice(pd, "unknown disc - no track?\n");
1816 if (di->disc_type != 0x20 && di->disc_type != 0) {
1817 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
1821 if (di->erasable == 0) {
1822 pkt_notice(pd, "disc not erasable\n");
1826 if (di->border_status == PACKET_SESSION_RESERVED) {
1827 pkt_err(pd, "can't write to last track (reserved)\n");
1834 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1836 struct packet_command cgc;
1837 unsigned char buf[12];
1838 disc_information di;
1839 track_information ti;
1842 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1843 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1845 ret = pkt_generic_packet(pd, &cgc);
1846 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1848 memset(&di, 0, sizeof(disc_information));
1849 memset(&ti, 0, sizeof(track_information));
1851 if ((ret = pkt_get_disc_info(pd, &di))) {
1852 pkt_err(pd, "failed get_disc\n");
1856 if (!pkt_writable_disc(pd, &di))
1859 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1861 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1862 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1863 pkt_err(pd, "failed get_track\n");
1867 if (!pkt_writable_track(pd, &ti)) {
1868 pkt_err(pd, "can't write to this track\n");
1873 * we keep packet size in 512 byte units, makes it easier to
1874 * deal with request calculations.
1876 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1877 if (pd->settings.size == 0) {
1878 pkt_notice(pd, "detected zero packet size!\n");
1881 if (pd->settings.size > PACKET_MAX_SECTORS) {
1882 pkt_err(pd, "packet size is too big\n");
1885 pd->settings.fp = ti.fp;
1886 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1889 pd->nwa = be32_to_cpu(ti.next_writable);
1890 set_bit(PACKET_NWA_VALID, &pd->flags);
1894 * in theory we could use lra on -RW media as well and just zero
1895 * blocks that haven't been written yet, but in practice that
1896 * is just a no-go. we'll use that for -R, naturally.
1899 pd->lra = be32_to_cpu(ti.last_rec_address);
1900 set_bit(PACKET_LRA_VALID, &pd->flags);
1902 pd->lra = 0xffffffff;
1903 set_bit(PACKET_LRA_VALID, &pd->flags);
1909 pd->settings.link_loss = 7;
1910 pd->settings.write_type = 0; /* packet */
1911 pd->settings.track_mode = ti.track_mode;
1914 * mode1 or mode2 disc
1916 switch (ti.data_mode) {
1918 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1921 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1924 pkt_err(pd, "unknown data mode\n");
1931 * enable/disable write caching on drive
1933 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1936 struct packet_command cgc;
1937 struct request_sense sense;
1938 unsigned char buf[64];
1941 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1943 cgc.buflen = pd->mode_offset + 12;
1946 * caching mode page might not be there, so quiet this command
1950 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
1953 buf[pd->mode_offset + 10] |= (!!set << 2);
1955 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1956 ret = pkt_mode_select(pd, &cgc);
1958 pkt_err(pd, "write caching control failed\n");
1959 pkt_dump_sense(pd, &cgc);
1960 } else if (!ret && set)
1961 pkt_notice(pd, "enabled write caching\n");
1965 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1967 struct packet_command cgc;
1969 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1970 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1971 cgc.cmd[4] = lockflag ? 1 : 0;
1972 return pkt_generic_packet(pd, &cgc);
1976 * Returns drive maximum write speed
1978 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1979 unsigned *write_speed)
1981 struct packet_command cgc;
1982 struct request_sense sense;
1983 unsigned char buf[256+18];
1984 unsigned char *cap_buf;
1987 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1988 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1991 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1993 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1994 sizeof(struct mode_page_header);
1995 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1997 pkt_dump_sense(pd, &cgc);
2002 offset = 20; /* Obsoleted field, used by older drives */
2003 if (cap_buf[1] >= 28)
2004 offset = 28; /* Current write speed selected */
2005 if (cap_buf[1] >= 30) {
2006 /* If the drive reports at least one "Logical Unit Write
2007 * Speed Performance Descriptor Block", use the information
2008 * in the first block. (contains the highest speed)
2010 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
2015 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
2019 /* These tables from cdrecord - I don't have orange book */
2020 /* standard speed CD-RW (1-4x) */
2021 static char clv_to_speed[16] = {
2022 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2023 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2025 /* high speed CD-RW (-10x) */
2026 static char hs_clv_to_speed[16] = {
2027 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2028 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2030 /* ultra high speed CD-RW */
2031 static char us_clv_to_speed[16] = {
2032 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2033 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2037 * reads the maximum media speed from ATIP
2039 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
2042 struct packet_command cgc;
2043 struct request_sense sense;
2044 unsigned char buf[64];
2045 unsigned int size, st, sp;
2048 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
2050 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2052 cgc.cmd[2] = 4; /* READ ATIP */
2054 ret = pkt_generic_packet(pd, &cgc);
2056 pkt_dump_sense(pd, &cgc);
2059 size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
2060 if (size > sizeof(buf))
2063 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
2065 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2069 ret = pkt_generic_packet(pd, &cgc);
2071 pkt_dump_sense(pd, &cgc);
2075 if (!(buf[6] & 0x40)) {
2076 pkt_notice(pd, "disc type is not CD-RW\n");
2079 if (!(buf[6] & 0x4)) {
2080 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
2084 st = (buf[6] >> 3) & 0x7; /* disc sub-type */
2086 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
2088 /* Info from cdrecord */
2090 case 0: /* standard speed */
2091 *speed = clv_to_speed[sp];
2093 case 1: /* high speed */
2094 *speed = hs_clv_to_speed[sp];
2096 case 2: /* ultra high speed */
2097 *speed = us_clv_to_speed[sp];
2100 pkt_notice(pd, "unknown disc sub-type %d\n", st);
2104 pkt_info(pd, "maximum media speed: %d\n", *speed);
2107 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
2112 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2114 struct packet_command cgc;
2115 struct request_sense sense;
2118 pkt_dbg(2, pd, "Performing OPC\n");
2120 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2122 cgc.timeout = 60*HZ;
2123 cgc.cmd[0] = GPCMD_SEND_OPC;
2125 if ((ret = pkt_generic_packet(pd, &cgc)))
2126 pkt_dump_sense(pd, &cgc);
2130 static int pkt_open_write(struct pktcdvd_device *pd)
2133 unsigned int write_speed, media_write_speed, read_speed;
2135 if ((ret = pkt_probe_settings(pd))) {
2136 pkt_dbg(2, pd, "failed probe\n");
2140 if ((ret = pkt_set_write_settings(pd))) {
2141 pkt_dbg(1, pd, "failed saving write settings\n");
2145 pkt_write_caching(pd, USE_WCACHING);
2147 if ((ret = pkt_get_max_speed(pd, &write_speed)))
2148 write_speed = 16 * 177;
2149 switch (pd->mmc3_profile) {
2150 case 0x13: /* DVD-RW */
2151 case 0x1a: /* DVD+RW */
2152 case 0x12: /* DVD-RAM */
2153 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
2156 if ((ret = pkt_media_speed(pd, &media_write_speed)))
2157 media_write_speed = 16;
2158 write_speed = min(write_speed, media_write_speed * 177);
2159 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
2162 read_speed = write_speed;
2164 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
2165 pkt_dbg(1, pd, "couldn't set write speed\n");
2168 pd->write_speed = write_speed;
2169 pd->read_speed = read_speed;
2171 if ((ret = pkt_perform_opc(pd))) {
2172 pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
2179 * called at open time.
2181 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2185 struct request_queue *q;
2188 * We need to re-open the cdrom device without O_NONBLOCK to be able
2189 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2190 * so bdget() can't fail.
2192 bdget(pd->bdev->bd_dev);
2193 if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
2196 if ((ret = pkt_get_last_written(pd, &lba))) {
2197 pkt_err(pd, "pkt_get_last_written failed\n");
2201 set_capacity(pd->disk, lba << 2);
2202 set_capacity(pd->bdev->bd_disk, lba << 2);
2203 bd_set_size(pd->bdev, (loff_t)lba << 11);
2205 q = bdev_get_queue(pd->bdev);
2207 if ((ret = pkt_open_write(pd)))
2210 * Some CDRW drives can not handle writes larger than one packet,
2211 * even if the size is a multiple of the packet size.
2213 spin_lock_irq(q->queue_lock);
2214 blk_queue_max_hw_sectors(q, pd->settings.size);
2215 spin_unlock_irq(q->queue_lock);
2216 set_bit(PACKET_WRITABLE, &pd->flags);
2218 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2219 clear_bit(PACKET_WRITABLE, &pd->flags);
2222 if ((ret = pkt_set_segment_merging(pd, q)))
2226 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2227 pkt_err(pd, "not enough memory for buffers\n");
2231 pkt_info(pd, "%lukB available on disc\n", lba << 1);
2237 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2243 * called when the device is closed. makes sure that the device flushes
2244 * the internal cache before we close.
2246 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2248 if (flush && pkt_flush_cache(pd))
2249 pkt_dbg(1, pd, "not flushing cache\n");
2251 pkt_lock_door(pd, 0);
2253 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2254 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2256 pkt_shrink_pktlist(pd);
2259 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2261 if (dev_minor >= MAX_WRITERS)
2263 return pkt_devs[dev_minor];
2266 static int pkt_open(struct block_device *bdev, fmode_t mode)
2268 struct pktcdvd_device *pd = NULL;
2271 mutex_lock(&pktcdvd_mutex);
2272 mutex_lock(&ctl_mutex);
2273 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2278 BUG_ON(pd->refcnt < 0);
2281 if (pd->refcnt > 1) {
2282 if ((mode & FMODE_WRITE) &&
2283 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2288 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2292 * needed here as well, since ext2 (among others) may change
2293 * the blocksize at mount time
2295 set_blocksize(bdev, CD_FRAMESIZE);
2298 mutex_unlock(&ctl_mutex);
2299 mutex_unlock(&pktcdvd_mutex);
2305 mutex_unlock(&ctl_mutex);
2306 mutex_unlock(&pktcdvd_mutex);
2310 static void pkt_close(struct gendisk *disk, fmode_t mode)
2312 struct pktcdvd_device *pd = disk->private_data;
2314 mutex_lock(&pktcdvd_mutex);
2315 mutex_lock(&ctl_mutex);
2317 BUG_ON(pd->refcnt < 0);
2318 if (pd->refcnt == 0) {
2319 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2320 pkt_release_dev(pd, flush);
2322 mutex_unlock(&ctl_mutex);
2323 mutex_unlock(&pktcdvd_mutex);
2327 static void pkt_end_io_read_cloned(struct bio *bio, int err)
2329 struct packet_stacked_data *psd = bio->bi_private;
2330 struct pktcdvd_device *pd = psd->pd;
2333 bio_endio(psd->bio, err);
2334 mempool_free(psd, psd_pool);
2335 pkt_bio_finished(pd);
2338 static void pkt_make_request(struct request_queue *q, struct bio *bio)
2340 struct pktcdvd_device *pd;
2341 char b[BDEVNAME_SIZE];
2343 struct packet_data *pkt;
2344 int was_empty, blocked_bio;
2345 struct pkt_rb_node *node;
2349 pr_err("%s incorrect request queue\n",
2350 bdevname(bio->bi_bdev, b));
2355 * Clone READ bios so we can have our own bi_end_io callback.
2357 if (bio_data_dir(bio) == READ) {
2358 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2359 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2363 cloned_bio->bi_bdev = pd->bdev;
2364 cloned_bio->bi_private = psd;
2365 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2366 pd->stats.secs_r += bio_sectors(bio);
2367 pkt_queue_bio(pd, cloned_bio);
2371 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2372 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2373 (unsigned long long)bio->bi_sector);
2377 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2378 pkt_err(pd, "wrong bio size\n");
2382 blk_queue_bounce(q, &bio);
2384 zone = get_zone(bio->bi_sector, pd);
2385 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2386 (unsigned long long)bio->bi_sector,
2387 (unsigned long long)bio_end_sector(bio));
2389 /* Check if we have to split the bio */
2391 struct bio_pair *bp;
2395 last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2396 if (last_zone != zone) {
2397 BUG_ON(last_zone != zone + pd->settings.size);
2398 first_sectors = last_zone - bio->bi_sector;
2399 bp = bio_split(bio, first_sectors);
2401 pkt_make_request(q, &bp->bio1);
2402 pkt_make_request(q, &bp->bio2);
2403 bio_pair_release(bp);
2409 * If we find a matching packet in state WAITING or READ_WAIT, we can
2410 * just append this bio to that packet.
2412 spin_lock(&pd->cdrw.active_list_lock);
2414 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2415 if (pkt->sector == zone) {
2416 spin_lock(&pkt->lock);
2417 if ((pkt->state == PACKET_WAITING_STATE) ||
2418 (pkt->state == PACKET_READ_WAIT_STATE)) {
2419 bio_list_add(&pkt->orig_bios, bio);
2420 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
2421 if ((pkt->write_size >= pkt->frames) &&
2422 (pkt->state == PACKET_WAITING_STATE)) {
2423 atomic_inc(&pkt->run_sm);
2424 wake_up(&pd->wqueue);
2426 spin_unlock(&pkt->lock);
2427 spin_unlock(&pd->cdrw.active_list_lock);
2432 spin_unlock(&pkt->lock);
2435 spin_unlock(&pd->cdrw.active_list_lock);
2438 * Test if there is enough room left in the bio work queue
2439 * (queue size >= congestion on mark).
2440 * If not, wait till the work queue size is below the congestion off mark.
2442 spin_lock(&pd->lock);
2443 if (pd->write_congestion_on > 0
2444 && pd->bio_queue_size >= pd->write_congestion_on) {
2445 set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
2447 spin_unlock(&pd->lock);
2448 congestion_wait(BLK_RW_ASYNC, HZ);
2449 spin_lock(&pd->lock);
2450 } while(pd->bio_queue_size > pd->write_congestion_off);
2452 spin_unlock(&pd->lock);
2455 * No matching packet found. Store the bio in the work queue.
2457 node = mempool_alloc(pd->rb_pool, GFP_NOIO);
2459 spin_lock(&pd->lock);
2460 BUG_ON(pd->bio_queue_size < 0);
2461 was_empty = (pd->bio_queue_size == 0);
2462 pkt_rbtree_insert(pd, node);
2463 spin_unlock(&pd->lock);
2466 * Wake up the worker thread.
2468 atomic_set(&pd->scan_queue, 1);
2470 /* This wake_up is required for correct operation */
2471 wake_up(&pd->wqueue);
2472 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2474 * This wake up is not required for correct operation,
2475 * but improves performance in some cases.
2477 wake_up(&pd->wqueue);
2486 static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2487 struct bio_vec *bvec)
2489 struct pktcdvd_device *pd = q->queuedata;
2490 sector_t zone = get_zone(bmd->bi_sector, pd);
2491 int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
2492 int remaining = (pd->settings.size << 9) - used;
2496 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2497 * boundary, pkt_make_request() will split the bio.
2499 remaining2 = PAGE_SIZE - bmd->bi_size;
2500 remaining = max(remaining, remaining2);
2502 BUG_ON(remaining < 0);
2506 static void pkt_init_queue(struct pktcdvd_device *pd)
2508 struct request_queue *q = pd->disk->queue;
2510 blk_queue_make_request(q, pkt_make_request);
2511 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2512 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2513 blk_queue_merge_bvec(q, pkt_merge_bvec);
2517 static int pkt_seq_show(struct seq_file *m, void *p)
2519 struct pktcdvd_device *pd = m->private;
2521 char bdev_buf[BDEVNAME_SIZE];
2522 int states[PACKET_NUM_STATES];
2524 seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2525 bdevname(pd->bdev, bdev_buf));
2527 seq_printf(m, "\nSettings:\n");
2528 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2530 if (pd->settings.write_type == 0)
2534 seq_printf(m, "\twrite type:\t\t%s\n", msg);
2536 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2537 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2539 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2541 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2543 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2547 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2549 seq_printf(m, "\nStatistics:\n");
2550 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2551 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2552 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2553 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2554 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2556 seq_printf(m, "\nMisc:\n");
2557 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2558 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2559 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2560 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2561 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2562 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2564 seq_printf(m, "\nQueue state:\n");
2565 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2566 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2567 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2569 pkt_count_states(pd, states);
2570 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2571 states[0], states[1], states[2], states[3], states[4], states[5]);
2573 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2574 pd->write_congestion_off,
2575 pd->write_congestion_on);
2579 static int pkt_seq_open(struct inode *inode, struct file *file)
2581 return single_open(file, pkt_seq_show, PDE_DATA(inode));
2584 static const struct file_operations pkt_proc_fops = {
2585 .open = pkt_seq_open,
2587 .llseek = seq_lseek,
2588 .release = single_release
2591 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2595 char b[BDEVNAME_SIZE];
2596 struct block_device *bdev;
2598 if (pd->pkt_dev == dev) {
2599 pkt_err(pd, "recursive setup not allowed\n");
2602 for (i = 0; i < MAX_WRITERS; i++) {
2603 struct pktcdvd_device *pd2 = pkt_devs[i];
2606 if (pd2->bdev->bd_dev == dev) {
2607 pkt_err(pd, "%s already setup\n",
2608 bdevname(pd2->bdev, b));
2611 if (pd2->pkt_dev == dev) {
2612 pkt_err(pd, "can't chain pktcdvd devices\n");
2620 ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
2624 /* This is safe, since we have a reference from open(). */
2625 __module_get(THIS_MODULE);
2628 set_blocksize(bdev, CD_FRAMESIZE);
2632 atomic_set(&pd->cdrw.pending_bios, 0);
2633 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2634 if (IS_ERR(pd->cdrw.thread)) {
2635 pkt_err(pd, "can't start kernel thread\n");
2640 proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
2641 pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
2645 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2646 /* This is safe: open() is still holding a reference. */
2647 module_put(THIS_MODULE);
2651 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2653 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2656 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
2657 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2659 mutex_lock(&pktcdvd_mutex);
2663 * The door gets locked when the device is opened, so we
2664 * have to unlock it or else the eject command fails.
2666 if (pd->refcnt == 1)
2667 pkt_lock_door(pd, 0);
2670 * forward selected CDROM ioctls to CD-ROM, for UDF
2672 case CDROMMULTISESSION:
2673 case CDROMREADTOCENTRY:
2674 case CDROM_LAST_WRITTEN:
2675 case CDROM_SEND_PACKET:
2676 case SCSI_IOCTL_SEND_COMMAND:
2677 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
2681 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
2684 mutex_unlock(&pktcdvd_mutex);
2689 static unsigned int pkt_check_events(struct gendisk *disk,
2690 unsigned int clearing)
2692 struct pktcdvd_device *pd = disk->private_data;
2693 struct gendisk *attached_disk;
2699 attached_disk = pd->bdev->bd_disk;
2700 if (!attached_disk || !attached_disk->fops->check_events)
2702 return attached_disk->fops->check_events(attached_disk, clearing);
2705 static const struct block_device_operations pktcdvd_ops = {
2706 .owner = THIS_MODULE,
2708 .release = pkt_close,
2710 .check_events = pkt_check_events,
2713 static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
2715 return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
2719 * Set up mapping from pktcdvd device to CD-ROM device.
2721 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2725 struct pktcdvd_device *pd;
2726 struct gendisk *disk;
2728 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2730 for (idx = 0; idx < MAX_WRITERS; idx++)
2733 if (idx == MAX_WRITERS) {
2734 pr_err("max %d writers supported\n", MAX_WRITERS);
2739 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2743 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
2744 sizeof(struct pkt_rb_node));
2748 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2749 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2750 spin_lock_init(&pd->cdrw.active_list_lock);
2752 spin_lock_init(&pd->lock);
2753 spin_lock_init(&pd->iosched.lock);
2754 bio_list_init(&pd->iosched.read_queue);
2755 bio_list_init(&pd->iosched.write_queue);
2756 sprintf(pd->name, DRIVER_NAME"%d", idx);
2757 init_waitqueue_head(&pd->wqueue);
2758 pd->bio_queue = RB_ROOT;
2760 pd->write_congestion_on = write_congestion_on;
2761 pd->write_congestion_off = write_congestion_off;
2763 disk = alloc_disk(1);
2767 disk->major = pktdev_major;
2768 disk->first_minor = idx;
2769 disk->fops = &pktcdvd_ops;
2770 disk->flags = GENHD_FL_REMOVABLE;
2771 strcpy(disk->disk_name, pd->name);
2772 disk->devnode = pktcdvd_devnode;
2773 disk->private_data = pd;
2774 disk->queue = blk_alloc_queue(GFP_KERNEL);
2778 pd->pkt_dev = MKDEV(pktdev_major, idx);
2779 ret = pkt_new_dev(pd, dev);
2783 /* inherit events of the host device */
2784 disk->events = pd->bdev->bd_disk->events;
2785 disk->async_events = pd->bdev->bd_disk->async_events;
2789 pkt_sysfs_dev_new(pd);
2790 pkt_debugfs_dev_new(pd);
2794 *pkt_dev = pd->pkt_dev;
2796 mutex_unlock(&ctl_mutex);
2800 blk_cleanup_queue(disk->queue);
2805 mempool_destroy(pd->rb_pool);
2808 mutex_unlock(&ctl_mutex);
2809 pr_err("setup of pktcdvd device failed\n");
2814 * Tear down mapping from pktcdvd device to CD-ROM device.
2816 static int pkt_remove_dev(dev_t pkt_dev)
2818 struct pktcdvd_device *pd;
2822 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2824 for (idx = 0; idx < MAX_WRITERS; idx++) {
2826 if (pd && (pd->pkt_dev == pkt_dev))
2829 if (idx == MAX_WRITERS) {
2830 pr_debug("dev not setup\n");
2835 if (pd->refcnt > 0) {
2839 if (!IS_ERR(pd->cdrw.thread))
2840 kthread_stop(pd->cdrw.thread);
2842 pkt_devs[idx] = NULL;
2844 pkt_debugfs_dev_remove(pd);
2845 pkt_sysfs_dev_remove(pd);
2847 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2849 remove_proc_entry(pd->name, pkt_proc);
2850 pkt_dbg(1, pd, "writer unmapped\n");
2852 del_gendisk(pd->disk);
2853 blk_cleanup_queue(pd->disk->queue);
2856 mempool_destroy(pd->rb_pool);
2859 /* This is safe: open() is still holding a reference. */
2860 module_put(THIS_MODULE);
2863 mutex_unlock(&ctl_mutex);
2867 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2869 struct pktcdvd_device *pd;
2871 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2873 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2875 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2876 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2879 ctrl_cmd->pkt_dev = 0;
2881 ctrl_cmd->num_devices = MAX_WRITERS;
2883 mutex_unlock(&ctl_mutex);
2886 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2888 void __user *argp = (void __user *)arg;
2889 struct pkt_ctrl_command ctrl_cmd;
2893 if (cmd != PACKET_CTRL_CMD)
2896 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2899 switch (ctrl_cmd.command) {
2900 case PKT_CTRL_CMD_SETUP:
2901 if (!capable(CAP_SYS_ADMIN))
2903 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2904 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2906 case PKT_CTRL_CMD_TEARDOWN:
2907 if (!capable(CAP_SYS_ADMIN))
2909 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2911 case PKT_CTRL_CMD_STATUS:
2912 pkt_get_status(&ctrl_cmd);
2918 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2923 #ifdef CONFIG_COMPAT
2924 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2926 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2930 static const struct file_operations pkt_ctl_fops = {
2931 .open = nonseekable_open,
2932 .unlocked_ioctl = pkt_ctl_ioctl,
2933 #ifdef CONFIG_COMPAT
2934 .compat_ioctl = pkt_ctl_compat_ioctl,
2936 .owner = THIS_MODULE,
2937 .llseek = no_llseek,
2940 static struct miscdevice pkt_misc = {
2941 .minor = MISC_DYNAMIC_MINOR,
2942 .name = DRIVER_NAME,
2943 .nodename = "pktcdvd/control",
2944 .fops = &pkt_ctl_fops
2947 static int __init pkt_init(void)
2951 mutex_init(&ctl_mutex);
2953 psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
2954 sizeof(struct packet_stacked_data));
2958 ret = register_blkdev(pktdev_major, DRIVER_NAME);
2960 pr_err("unable to register block device\n");
2966 ret = pkt_sysfs_init();
2972 ret = misc_register(&pkt_misc);
2974 pr_err("unable to register misc device\n");
2978 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2983 pkt_debugfs_cleanup();
2984 pkt_sysfs_cleanup();
2986 unregister_blkdev(pktdev_major, DRIVER_NAME);
2988 mempool_destroy(psd_pool);
2992 static void __exit pkt_exit(void)
2994 remove_proc_entry("driver/"DRIVER_NAME, NULL);
2995 misc_deregister(&pkt_misc);
2997 pkt_debugfs_cleanup();
2998 pkt_sysfs_cleanup();
3000 unregister_blkdev(pktdev_major, DRIVER_NAME);
3001 mempool_destroy(psd_pool);
3004 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
3005 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
3006 MODULE_LICENSE("GPL");
3008 module_init(pkt_init);
3009 module_exit(pkt_exit);