2 * f_mass_storage.c -- Mass Storage USB Composite Function
4 * Copyright (C) 2003-2008 Alan Stern
5 * Copyright (C) 2009 Samsung Electronics
6 * Author: Michal Nazarewicz <mina86@mina86.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation, either version 2 of that License or (at your option) any
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * The Mass Storage Function acts as a USB Mass Storage device,
42 * appearing to the host as a disk drive or as a CD-ROM drive. In
43 * addition to providing an example of a genuinely useful composite
44 * function for a USB device, it also illustrates a technique of
45 * double-buffering for increased throughput.
47 * For more information about MSF and in particular its module
48 * parameters and sysfs interface read the
49 * <Documentation/usb/mass-storage.txt> file.
53 * MSF is configured by specifying a fsg_config structure. It has the
56 * nluns Number of LUNs function have (anywhere from 1
57 * to FSG_MAX_LUNS which is 8).
58 * luns An array of LUN configuration values. This
59 * should be filled for each LUN that
60 * function will include (ie. for "nluns"
61 * LUNs). Each element of the array has
62 * the following fields:
63 * ->filename The path to the backing file for the LUN.
64 * Required if LUN is not marked as
66 * ->ro Flag specifying access to the LUN shall be
67 * read-only. This is implied if CD-ROM
68 * emulation is enabled as well as when
69 * it was impossible to open "filename"
71 * ->removable Flag specifying that LUN shall be indicated as
73 * ->cdrom Flag specifying that LUN shall be reported as
75 * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12)
76 * commands for this LUN shall be ignored.
80 * release Information used as a reply to INQUIRY
81 * request. To use default set to NULL,
82 * NULL, 0xffff respectively. The first
83 * field should be 8 and the second 16
86 * can_stall Set to permit function to halt bulk endpoints.
87 * Disabled on some USB devices known not
88 * to work correctly. You should set it
91 * If "removable" is not set for a LUN then a backing file must be
92 * specified. If it is set, then NULL filename means the LUN's medium
93 * is not loaded (an empty string as "filename" in the fsg_config
94 * structure causes error). The CD-ROM emulation includes a single
95 * data track and no audio tracks; hence there need be only one
96 * backing file per LUN.
98 * This function is heavily based on "File-backed Storage Gadget" by
99 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
100 * Brownell. The driver's SCSI command interface was based on the
101 * "Information technology - Small Computer System Interface - 2"
102 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
103 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
104 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
105 * was based on the "Universal Serial Bus Mass Storage Class UFI
106 * Command Specification" document, Revision 1.0, December 14, 1998,
108 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
114 * The MSF is fairly straightforward. There is a main kernel
115 * thread that handles most of the work. Interrupt routines field
116 * callbacks from the controller driver: bulk- and interrupt-request
117 * completion notifications, endpoint-0 events, and disconnect events.
118 * Completion events are passed to the main thread by wakeup calls. Many
119 * ep0 requests are handled at interrupt time, but SetInterface,
120 * SetConfiguration, and device reset requests are forwarded to the
121 * thread in the form of "exceptions" using SIGUSR1 signals (since they
122 * should interrupt any ongoing file I/O operations).
124 * The thread's main routine implements the standard command/data/status
125 * parts of a SCSI interaction. It and its subroutines are full of tests
126 * for pending signals/exceptions -- all this polling is necessary since
127 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
128 * indication that the driver really wants to be running in userspace.)
129 * An important point is that so long as the thread is alive it keeps an
130 * open reference to the backing file. This will prevent unmounting
131 * the backing file's underlying filesystem and could cause problems
132 * during system shutdown, for example. To prevent such problems, the
133 * thread catches INT, TERM, and KILL signals and converts them into
136 * In normal operation the main thread is started during the gadget's
137 * fsg_bind() callback and stopped during fsg_unbind(). But it can
138 * also exit when it receives a signal, and there's no point leaving
139 * the gadget running when the thread is dead. As of this moment, MSF
140 * provides no way to deregister the gadget when thread dies -- maybe
141 * a callback functions is needed.
143 * To provide maximum throughput, the driver uses a circular pipeline of
144 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
145 * arbitrarily long; in practice the benefits don't justify having more
146 * than 2 stages (i.e., double buffering). But it helps to think of the
147 * pipeline as being a long one. Each buffer head contains a bulk-in and
148 * a bulk-out request pointer (since the buffer can be used for both
149 * output and input -- directions always are given from the host's
150 * point of view) as well as a pointer to the buffer and various state
153 * Use of the pipeline follows a simple protocol. There is a variable
154 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
155 * At any time that buffer head may still be in use from an earlier
156 * request, so each buffer head has a state variable indicating whether
157 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
158 * buffer head to be EMPTY, filling the buffer either by file I/O or by
159 * USB I/O (during which the buffer head is BUSY), and marking the buffer
160 * head FULL when the I/O is complete. Then the buffer will be emptied
161 * (again possibly by USB I/O, during which it is marked BUSY) and
162 * finally marked EMPTY again (possibly by a completion routine).
164 * A module parameter tells the driver to avoid stalling the bulk
165 * endpoints wherever the transport specification allows. This is
166 * necessary for some UDCs like the SuperH, which cannot reliably clear a
167 * halt on a bulk endpoint. However, under certain circumstances the
168 * Bulk-only specification requires a stall. In such cases the driver
169 * will halt the endpoint and set a flag indicating that it should clear
170 * the halt in software during the next device reset. Hopefully this
171 * will permit everything to work correctly. Furthermore, although the
172 * specification allows the bulk-out endpoint to halt when the host sends
173 * too much data, implementing this would cause an unavoidable race.
174 * The driver will always use the "no-stall" approach for OUT transfers.
176 * One subtle point concerns sending status-stage responses for ep0
177 * requests. Some of these requests, such as device reset, can involve
178 * interrupting an ongoing file I/O operation, which might take an
179 * arbitrarily long time. During that delay the host might give up on
180 * the original ep0 request and issue a new one. When that happens the
181 * driver should not notify the host about completion of the original
182 * request, as the host will no longer be waiting for it. So the driver
183 * assigns to each ep0 request a unique tag, and it keeps track of the
184 * tag value of the request associated with a long-running exception
185 * (device-reset, interface-change, or configuration-change). When the
186 * exception handler is finished, the status-stage response is submitted
187 * only if the current ep0 request tag is equal to the exception request
188 * tag. Thus only the most recently received ep0 request will get a
189 * status-stage response.
191 * Warning: This driver source file is too long. It ought to be split up
192 * into a header file plus about 3 separate .c files, to handle the details
193 * of the Gadget, USB Mass Storage, and SCSI protocols.
197 /* #define VERBOSE_DEBUG */
198 /* #define DUMP_MSGS */
200 #include <linux/blkdev.h>
201 #include <linux/completion.h>
202 #include <linux/dcache.h>
203 #include <linux/delay.h>
204 #include <linux/device.h>
205 #include <linux/fcntl.h>
206 #include <linux/file.h>
207 #include <linux/fs.h>
208 #include <linux/kref.h>
209 #include <linux/kthread.h>
210 #include <linux/limits.h>
211 #include <linux/rwsem.h>
212 #include <linux/slab.h>
213 #include <linux/spinlock.h>
214 #include <linux/string.h>
215 #include <linux/freezer.h>
217 #include <linux/usb/ch9.h>
218 #include <linux/usb/gadget.h>
219 #include <linux/usb/composite.h>
221 #include "gadget_chips.h"
224 /*------------------------------------------------------------------------*/
226 #define FSG_DRIVER_DESC "Mass Storage Function"
227 #define FSG_DRIVER_VERSION "2009/09/11"
229 static const char fsg_string_interface[] = "Mass Storage";
231 #include "storage_common.c"
234 /*-------------------------------------------------------------------------*/
239 /* FSF callback functions */
240 struct fsg_operations {
242 * Callback function to call when thread exits. If no
243 * callback is set or it returns value lower then zero MSF
244 * will force eject all LUNs it operates on (including those
245 * marked as non-removable or with prevent_medium_removal flag
248 int (*thread_exits)(struct fsg_common *common);
251 /* Data shared by all the FSG instances. */
253 struct usb_gadget *gadget;
254 struct usb_composite_dev *cdev;
255 struct fsg_dev *fsg, *new_fsg;
256 wait_queue_head_t fsg_wait;
258 /* filesem protects: backing files in use */
259 struct rw_semaphore filesem;
261 /* lock protects: state, all the req_busy's */
264 struct usb_ep *ep0; /* Copy of gadget->ep0 */
265 struct usb_request *ep0req; /* Copy of cdev->req */
266 unsigned int ep0_req_tag;
268 struct fsg_buffhd *next_buffhd_to_fill;
269 struct fsg_buffhd *next_buffhd_to_drain;
270 struct fsg_buffhd *buffhds;
273 u8 cmnd[MAX_COMMAND_SIZE];
276 unsigned int board_support_luns;
278 struct fsg_lun *luns;
279 struct fsg_lun *curlun;
281 unsigned int bulk_out_maxpacket;
282 enum fsg_state state; /* For exception handling */
283 unsigned int exception_req_tag;
285 enum data_direction data_dir;
287 u32 data_size_from_cmnd;
292 unsigned int can_stall:1;
293 unsigned int free_storage_on_release:1;
294 unsigned int phase_error:1;
295 unsigned int short_packet_received:1;
296 unsigned int bad_lun_okay:1;
297 unsigned int running:1;
299 int thread_wakeup_needed;
300 struct completion thread_notifier;
301 struct task_struct *thread_task;
303 /* Callback functions. */
304 const struct fsg_operations *ops;
305 /* Gadget's private data. */
309 * Vendor (8 chars), product (16 chars), release (4
310 * hexadecimal digits) and NUL byte
312 char inquiry_string[8 + 16 + 4 + 1];
315 struct work_struct fsync_work;
320 struct fsg_lun_config {
321 const char *filename;
326 } luns[FSG_MAX_LUNS];
328 /* Callback functions. */
329 const struct fsg_operations *ops;
330 /* Gadget's private data. */
333 const char *vendor_name; /* 8 characters or less */
334 const char *product_name; /* 16 characters or less */
340 struct usb_function function;
341 struct usb_gadget *gadget; /* Copy of cdev->gadget */
342 struct fsg_common *common;
344 u16 interface_number;
346 unsigned int bulk_in_enabled:1;
347 unsigned int bulk_out_enabled:1;
349 unsigned long atomic_bitflags;
350 #define IGNORE_BULK_OUT 0
352 struct usb_ep *bulk_in;
353 struct usb_ep *bulk_out;
356 static inline int __fsg_is_set(struct fsg_common *common,
357 const char *func, unsigned line)
361 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
366 #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
368 static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
370 return container_of(f, struct fsg_dev, function);
373 typedef void (*fsg_routine_t)(struct fsg_dev *);
375 static int exception_in_progress(struct fsg_common *common)
377 return common->state > FSG_STATE_IDLE;
380 /* Make bulk-out requests be divisible by the maxpacket size */
381 static void set_bulk_out_req_length(struct fsg_common *common,
382 struct fsg_buffhd *bh, unsigned int length)
386 bh->bulk_out_intended_length = length;
387 rem = length % common->bulk_out_maxpacket;
389 length += common->bulk_out_maxpacket - rem;
390 bh->outreq->length = length;
394 /*-------------------------------------------------------------------------*/
396 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
400 if (ep == fsg->bulk_in)
402 else if (ep == fsg->bulk_out)
406 DBG(fsg, "%s set halt\n", name);
407 return usb_ep_set_halt(ep);
411 /*-------------------------------------------------------------------------*/
413 /* These routines may be called in process context or in_irq */
415 /* Caller must hold fsg->lock */
416 static void wakeup_thread(struct fsg_common *common)
418 smp_wmb(); /* ensure the write of bh->state is complete */
419 /* Tell the main thread that something has happened */
420 common->thread_wakeup_needed = 1;
421 if (common->thread_task)
422 wake_up_process(common->thread_task);
425 static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
430 * Do nothing if a higher-priority exception is already in progress.
431 * If a lower-or-equal priority exception is in progress, preempt it
432 * and notify the main thread by sending it a signal.
434 printk("%s:state=%d,%d fsg=%p \n",__func__,common->state,new_state,common->new_fsg);
435 spin_lock_irqsave(&common->lock, flags);
436 if (common->state <= new_state) {
437 common->exception_req_tag = common->ep0_req_tag;
438 common->state = new_state;
439 if (common->thread_task)
440 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
441 common->thread_task);
443 spin_unlock_irqrestore(&common->lock, flags);
447 /*-------------------------------------------------------------------------*/
449 static int ep0_queue(struct fsg_common *common)
453 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
454 common->ep0->driver_data = common;
455 if (rc != 0 && rc != -ESHUTDOWN) {
456 /* We can't do much more than wait for a reset */
457 WARNING(common, "error in submission: %s --> %d\n",
458 common->ep0->name, rc);
464 /*-------------------------------------------------------------------------*/
466 /* Completion handlers. These always run in_irq. */
468 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
470 struct fsg_common *common = ep->driver_data;
471 struct fsg_buffhd *bh = req->context;
473 if (req->status || req->actual != req->length)
474 DBG(common, "%s --> %d, %u/%u\n", __func__,
475 req->status, req->actual, req->length);
476 if (req->status == -ECONNRESET) /* Request was cancelled */
477 usb_ep_fifo_flush(ep);
479 /* Hold the lock while we update the request and buffer states */
481 spin_lock(&common->lock);
483 bh->state = BUF_STATE_EMPTY;
484 wakeup_thread(common);
485 spin_unlock(&common->lock);
488 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
490 struct fsg_common *common = ep->driver_data;
491 struct fsg_buffhd *bh = req->context;
493 dump_msg(common, "bulk-out", req->buf, req->actual);
494 if (req->status || req->actual != bh->bulk_out_intended_length)
495 DBG(common, "%s --> %d, %u/%u\n", __func__,
496 req->status, req->actual, bh->bulk_out_intended_length);
497 if (req->status == -ECONNRESET) /* Request was cancelled */
498 usb_ep_fifo_flush(ep);
500 /* Hold the lock while we update the request and buffer states */
502 spin_lock(&common->lock);
504 bh->state = BUF_STATE_FULL;
505 wakeup_thread(common);
506 spin_unlock(&common->lock);
509 static int fsg_setup(struct usb_function *f,
510 const struct usb_ctrlrequest *ctrl)
512 struct fsg_dev *fsg = fsg_from_func(f);
513 struct usb_request *req = fsg->common->ep0req;
514 u16 w_index = le16_to_cpu(ctrl->wIndex);
515 u16 w_value = le16_to_cpu(ctrl->wValue);
516 u16 w_length = le16_to_cpu(ctrl->wLength);
518 if (!fsg_is_set(fsg->common))
521 ++fsg->common->ep0_req_tag; /* Record arrival of a new request */
524 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
526 switch (ctrl->bRequest) {
528 case US_BULK_RESET_REQUEST:
529 if (ctrl->bRequestType !=
530 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
532 if (w_index != fsg->interface_number || w_value != 0 ||
537 * Raise an exception to stop the current operation
538 * and reinitialize our state.
540 DBG(fsg, "bulk reset request\n");
541 raise_exception(fsg->common, FSG_STATE_RESET);
542 return DELAYED_STATUS;
544 case US_BULK_GET_MAX_LUN:
545 if (ctrl->bRequestType !=
546 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
548 if (w_index != fsg->interface_number || w_value != 0 ||
551 VDBG(fsg, "get max LUN\n");
552 *(u8 *)req->buf = fsg->common->board_support_luns - 1;
554 /* Respond with data/status */
555 req->length = min((u16)1, w_length);
556 return ep0_queue(fsg->common);
560 "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
561 ctrl->bRequestType, ctrl->bRequest,
562 le16_to_cpu(ctrl->wValue), w_index, w_length);
567 /*-------------------------------------------------------------------------*/
569 /* All the following routines run in process context */
571 /* Use this for bulk or interrupt transfers, not ep0 */
572 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
573 struct usb_request *req, int *pbusy,
574 enum fsg_buffer_state *state)
578 if (ep == fsg->bulk_in)
579 dump_msg(fsg, "bulk-in", req->buf, req->length);
581 spin_lock_irq(&fsg->common->lock);
583 *state = BUF_STATE_BUSY;
584 spin_unlock_irq(&fsg->common->lock);
585 rc = usb_ep_queue(ep, req, GFP_KERNEL);
588 *state = BUF_STATE_EMPTY;
590 /* We can't do much more than wait for a reset */
593 * Note: currently the net2280 driver fails zero-length
594 * submissions if DMA is enabled.
596 if (rc != -ESHUTDOWN &&
597 !(rc == -EOPNOTSUPP && req->length == 0))
598 WARNING(fsg, "error in submission: %s --> %d\n",
603 static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
605 if (!fsg_is_set(common))
607 start_transfer(common->fsg, common->fsg->bulk_in,
608 bh->inreq, &bh->inreq_busy, &bh->state);
612 static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
614 if (!fsg_is_set(common))
616 start_transfer(common->fsg, common->fsg->bulk_out,
617 bh->outreq, &bh->outreq_busy, &bh->state);
621 static int sleep_thread(struct fsg_common *common)
625 /* Wait until a signal arrives or we are woken up */
628 set_current_state(TASK_INTERRUPTIBLE);
629 if (signal_pending(current)) {
633 if (common->thread_wakeup_needed)
637 __set_current_state(TASK_RUNNING);
638 common->thread_wakeup_needed = 0;
639 smp_rmb(); /* ensure the latest bh->state is visible */
644 /*-------------------------------------------------------------------------*/
646 static int do_read(struct fsg_common *common)
648 struct fsg_lun *curlun = common->curlun;
650 struct fsg_buffhd *bh;
653 loff_t file_offset, file_offset_tmp;
658 * Get the starting Logical Block Address and check that it's
661 if (common->cmnd[0] == READ_6)
662 lba = get_unaligned_be24(&common->cmnd[1]);
664 lba = get_unaligned_be32(&common->cmnd[2]);
667 * We allow DPO (Disable Page Out = don't save data in the
668 * cache) and FUA (Force Unit Access = don't read from the
669 * cache), but we don't implement them.
671 if ((common->cmnd[1] & ~0x18) != 0) {
672 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
676 if (lba >= curlun->num_sectors) {
677 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
680 file_offset = ((loff_t) lba) << curlun->blkbits;
682 /* Carry out the file reads */
683 amount_left = common->data_size_from_cmnd;
684 if (unlikely(amount_left == 0))
685 return -EIO; /* No default reply */
689 * Figure out how much we need to read:
690 * Try to read the remaining amount.
691 * But don't read more than the buffer size.
692 * And don't try to read past the end of the file.
694 amount = min(amount_left, FSG_BUFLEN);
695 amount = min((loff_t)amount,
696 curlun->file_length - file_offset);
698 /* Wait for the next buffer to become available */
699 bh = common->next_buffhd_to_fill;
700 while (bh->state != BUF_STATE_EMPTY) {
701 rc = sleep_thread(common);
707 * If we were asked to read past the end of file,
708 * end with an empty buffer.
712 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
713 curlun->sense_data_info =
714 file_offset >> curlun->blkbits;
715 curlun->info_valid = 1;
716 bh->inreq->length = 0;
717 bh->state = BUF_STATE_FULL;
721 /* Perform the read */
722 file_offset_tmp = file_offset;
723 nread = vfs_read(curlun->filp,
724 (char __user *)bh->buf,
725 amount, &file_offset_tmp);
726 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
727 (unsigned long long)file_offset, (int)nread);
728 if (signal_pending(current))
732 LDBG(curlun, "error in file read: %d\n", (int)nread);
734 } else if (nread < amount) {
735 LDBG(curlun, "partial file read: %d/%u\n",
737 nread = round_down(nread, curlun->blksize);
739 file_offset += nread;
740 amount_left -= nread;
741 common->residue -= nread;
744 * Except at the end of the transfer, nread will be
745 * equal to the buffer size, which is divisible by the
746 * bulk-in maxpacket size.
748 bh->inreq->length = nread;
749 bh->state = BUF_STATE_FULL;
751 /* If an error occurred, report it and its position */
752 if (nread < amount) {
753 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
754 curlun->sense_data_info =
755 file_offset >> curlun->blkbits;
756 curlun->info_valid = 1;
760 if (amount_left == 0)
761 break; /* No more left to read */
763 /* Send this buffer and go read some more */
765 if (!start_in_transfer(common, bh))
766 /* Don't know what to do if common->fsg is NULL */
768 common->next_buffhd_to_fill = bh->next;
771 return -EIO; /* No default reply */
775 /*-------------------------------------------------------------------------*/
777 static int do_write(struct fsg_common *common)
779 struct fsg_lun *curlun = common->curlun;
781 struct fsg_buffhd *bh;
783 u32 amount_left_to_req, amount_left_to_write;
784 loff_t usb_offset, file_offset, file_offset_tmp;
790 curlun->sense_data = SS_WRITE_PROTECTED;
793 spin_lock(&curlun->filp->f_lock);
794 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
795 spin_unlock(&curlun->filp->f_lock);
798 * Get the starting Logical Block Address and check that it's
801 if (common->cmnd[0] == WRITE_6)
802 lba = get_unaligned_be24(&common->cmnd[1]);
804 lba = get_unaligned_be32(&common->cmnd[2]);
807 * We allow DPO (Disable Page Out = don't save data in the
808 * cache) and FUA (Force Unit Access = write directly to the
809 * medium). We don't implement DPO; we implement FUA by
810 * performing synchronous output.
812 if (common->cmnd[1] & ~0x18) {
813 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
816 if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
817 spin_lock(&curlun->filp->f_lock);
818 curlun->filp->f_flags |= O_SYNC;
819 spin_unlock(&curlun->filp->f_lock);
822 if (lba >= curlun->num_sectors) {
823 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
827 /* Carry out the file writes */
829 file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
830 amount_left_to_req = common->data_size_from_cmnd;
831 amount_left_to_write = common->data_size_from_cmnd;
833 while (amount_left_to_write > 0) {
835 /* Queue a request for more data from the host */
836 bh = common->next_buffhd_to_fill;
837 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
840 * Figure out how much we want to get:
841 * Try to get the remaining amount,
842 * but not more than the buffer size.
844 amount = min(amount_left_to_req, FSG_BUFLEN);
846 /* Beyond the end of the backing file? */
847 if (usb_offset >= curlun->file_length) {
850 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
851 curlun->sense_data_info =
852 usb_offset >> curlun->blkbits;
853 curlun->info_valid = 1;
857 /* Get the next buffer */
858 usb_offset += amount;
859 common->usb_amount_left -= amount;
860 amount_left_to_req -= amount;
861 if (amount_left_to_req == 0)
865 * Except at the end of the transfer, amount will be
866 * equal to the buffer size, which is divisible by
867 * the bulk-out maxpacket size.
869 set_bulk_out_req_length(common, bh, amount);
870 if (!start_out_transfer(common, bh))
871 /* Dunno what to do if common->fsg is NULL */
873 common->next_buffhd_to_fill = bh->next;
877 /* Write the received data to the backing file */
878 bh = common->next_buffhd_to_drain;
879 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
880 break; /* We stopped early */
881 if (bh->state == BUF_STATE_FULL) {
883 common->next_buffhd_to_drain = bh->next;
884 bh->state = BUF_STATE_EMPTY;
886 /* Did something go wrong with the transfer? */
887 if (bh->outreq->status != 0) {
888 curlun->sense_data = SS_COMMUNICATION_FAILURE;
889 curlun->sense_data_info =
890 file_offset >> curlun->blkbits;
891 curlun->info_valid = 1;
895 amount = bh->outreq->actual;
896 if (curlun->file_length - file_offset < amount) {
898 "write %u @ %llu beyond end %llu\n",
899 amount, (unsigned long long)file_offset,
900 (unsigned long long)curlun->file_length);
901 amount = curlun->file_length - file_offset;
904 /* Don't accept excess data. The spec doesn't say
905 * what to do in this case. We'll ignore the error.
907 amount = min(amount, bh->bulk_out_intended_length);
909 /* Don't write a partial block */
910 amount = round_down(amount, curlun->blksize);
914 /* Perform the write */
915 file_offset_tmp = file_offset;
916 nwritten = vfs_write(curlun->filp,
917 (char __user *)bh->buf,
918 amount, &file_offset_tmp);
919 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
920 (unsigned long long)file_offset, (int)nwritten);
921 if (signal_pending(current))
922 return -EINTR; /* Interrupted! */
925 LDBG(curlun, "error in file write: %d\n",
928 } else if (nwritten < amount) {
929 LDBG(curlun, "partial file write: %d/%u\n",
930 (int)nwritten, amount);
931 nwritten = round_down(nwritten, curlun->blksize);
933 file_offset += nwritten;
934 amount_left_to_write -= nwritten;
935 common->residue -= nwritten;
937 /* If an error occurred, report it and its position */
938 if (nwritten < amount) {
939 curlun->sense_data = SS_WRITE_ERROR;
940 curlun->sense_data_info =
941 file_offset >> curlun->blkbits;
942 curlun->info_valid = 1;
947 /* Did the host decide to stop early? */
948 if (bh->outreq->actual < bh->bulk_out_intended_length) {
949 common->short_packet_received = 1;
955 /* Wait for something to happen */
956 rc = sleep_thread(common);
961 return -EIO; /* No default reply */
965 /*-------------------------------------------------------------------------*/
967 static int do_synchronize_cache(struct fsg_common *common)
969 struct fsg_lun *curlun = common->curlun;
972 /* We ignore the requested LBA and write out all file's
973 * dirty data buffers. */
974 rc = fsg_lun_fsync_sub(curlun);
976 curlun->sense_data = SS_WRITE_ERROR;
981 /*-------------------------------------------------------------------------*/
983 static void invalidate_sub(struct fsg_lun *curlun)
985 struct file *filp = curlun->filp;
986 struct inode *inode = file_inode(filp);
989 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
990 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
993 static int do_verify(struct fsg_common *common)
995 struct fsg_lun *curlun = common->curlun;
997 u32 verification_length;
998 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
999 loff_t file_offset, file_offset_tmp;
1001 unsigned int amount;
1005 * Get the starting Logical Block Address and check that it's
1008 lba = get_unaligned_be32(&common->cmnd[2]);
1009 if (lba >= curlun->num_sectors) {
1010 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1015 * We allow DPO (Disable Page Out = don't save data in the
1016 * cache) but we don't implement it.
1018 if (common->cmnd[1] & ~0x10) {
1019 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1023 verification_length = get_unaligned_be16(&common->cmnd[7]);
1024 if (unlikely(verification_length == 0))
1025 return -EIO; /* No default reply */
1027 /* Prepare to carry out the file verify */
1028 amount_left = verification_length << curlun->blkbits;
1029 file_offset = ((loff_t) lba) << curlun->blkbits;
1031 /* Write out all the dirty buffers before invalidating them */
1032 fsg_lun_fsync_sub(curlun);
1033 if (signal_pending(current))
1036 invalidate_sub(curlun);
1037 if (signal_pending(current))
1040 /* Just try to read the requested blocks */
1041 while (amount_left > 0) {
1043 * Figure out how much we need to read:
1044 * Try to read the remaining amount, but not more than
1046 * And don't try to read past the end of the file.
1048 amount = min(amount_left, FSG_BUFLEN);
1049 amount = min((loff_t)amount,
1050 curlun->file_length - file_offset);
1052 curlun->sense_data =
1053 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1054 curlun->sense_data_info =
1055 file_offset >> curlun->blkbits;
1056 curlun->info_valid = 1;
1060 /* Perform the read */
1061 file_offset_tmp = file_offset;
1062 nread = vfs_read(curlun->filp,
1063 (char __user *) bh->buf,
1064 amount, &file_offset_tmp);
1065 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1066 (unsigned long long) file_offset,
1068 if (signal_pending(current))
1072 LDBG(curlun, "error in file verify: %d\n", (int)nread);
1074 } else if (nread < amount) {
1075 LDBG(curlun, "partial file verify: %d/%u\n",
1076 (int)nread, amount);
1077 nread = round_down(nread, curlun->blksize);
1080 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1081 curlun->sense_data_info =
1082 file_offset >> curlun->blkbits;
1083 curlun->info_valid = 1;
1086 file_offset += nread;
1087 amount_left -= nread;
1093 /*-------------------------------------------------------------------------*/
1095 static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1097 struct fsg_lun *curlun = common->curlun;
1098 u8 *buf = (u8 *) bh->buf;
1100 if (!curlun) { /* Unsupported LUNs are okay */
1101 common->bad_lun_okay = 1;
1103 buf[0] = 0x7f; /* Unsupported, no device-type */
1104 buf[4] = 31; /* Additional length */
1108 buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
1109 buf[1] = curlun->removable ? 0x80 : 0;
1110 buf[2] = 2; /* ANSI SCSI level 2 */
1111 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1112 buf[4] = 31; /* Additional length */
1113 buf[5] = 0; /* No special options */
1116 memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
1120 static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1122 struct fsg_lun *curlun = common->curlun;
1123 u8 *buf = (u8 *) bh->buf;
1128 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1130 * If a REQUEST SENSE command is received from an initiator
1131 * with a pending unit attention condition (before the target
1132 * generates the contingent allegiance condition), then the
1133 * target shall either:
1134 * a) report any pending sense data and preserve the unit
1135 * attention condition on the logical unit, or,
1136 * b) report the unit attention condition, may discard any
1137 * pending sense data, and clear the unit attention
1138 * condition on the logical unit for that initiator.
1140 * FSG normally uses option a); enable this code to use option b).
1143 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1144 curlun->sense_data = curlun->unit_attention_data;
1145 curlun->unit_attention_data = SS_NO_SENSE;
1149 if (!curlun) { /* Unsupported LUNs are okay */
1150 common->bad_lun_okay = 1;
1151 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1155 sd = curlun->sense_data;
1156 sdinfo = curlun->sense_data_info;
1157 valid = curlun->info_valid << 7;
1158 curlun->sense_data = SS_NO_SENSE;
1159 curlun->sense_data_info = 0;
1160 curlun->info_valid = 0;
1164 buf[0] = valid | 0x70; /* Valid, current error */
1166 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1167 buf[7] = 18 - 8; /* Additional sense length */
1173 static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1175 struct fsg_lun *curlun = common->curlun;
1176 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1177 int pmi = common->cmnd[8];
1178 u8 *buf = (u8 *)bh->buf;
1180 /* Check the PMI and LBA fields */
1181 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1182 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1186 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1187 /* Max logical block */
1188 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
1192 static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1194 struct fsg_lun *curlun = common->curlun;
1195 int msf = common->cmnd[1] & 0x02;
1196 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1197 u8 *buf = (u8 *)bh->buf;
1199 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
1200 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1203 if (lba >= curlun->num_sectors) {
1204 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1209 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1210 store_cdrom_address(&buf[4], msf, lba);
1214 static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1216 struct fsg_lun *curlun = common->curlun;
1217 int msf = common->cmnd[1] & 0x02;
1218 int start_track = common->cmnd[6];
1219 u8 *buf = (u8 *)bh->buf;
1220 #ifdef CONFIG_USB_SPRD_DWC
1221 // Seen in MMC5RC01 6.32 READ TOC/PMA/ATIP Command
1222 u8 toc_response_data1[] = {
1223 0x00, 0x12, 0x01, 0x01, 0x00, 0x14, 0x01, 0x00,
1224 0x00, 0x00, 0x00, 0x00
1226 u8 toc_response_data2[] = {
1227 0x00, 0x2e, 0x01, 0x01, 0x01, 0x14, 0x00, 0xa0,
1228 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01,
1229 0x14, 0x00, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x01,
1230 0x00, 0x00, 0x01, 0x14, 0x00, 0xa2, 0x00, 0x00,
1231 0x00, 0x00, 0x09, 0x0c, 0x94, 0x14, 0xf2, 0x00,
1232 0xa8, 0x14, 0xf2, 0x00, 0xbc, 0x14, 0xf2, 0x00
1234 u8 toc_response_data3[] = {
1235 0x00, 0x2e, 0x01, 0x01, 0x01, 0x14, 0x00, 0xa0,
1236 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01,
1237 0x14, 0x00, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x01,
1238 0x00, 0x00, 0x01, 0x14, 0x00, 0xa2, 0x00, 0x00,
1239 0x00, 0x00, 0x09, 0x0c, 0x94, 0x14, 0xf2, 0x00,
1240 0xa8, 0x14, 0xf2, 0x00, 0xbc, 0x14, 0xf2, 0x00,
1241 0xd0, 0x14, 0xf2, 0x00, 0xe4, 0x14, 0xf2, 0x00,
1242 0x12, 0x00, 0x12, 0x00, 0x12, 0x00, 0x12, 0x00,
1243 0x12, 0x00, 0x34, 0x35, 0x33, 0x32, 0x33, 0x32,
1244 0x33, 0x38, 0x00, 0x00, 0xf0, 0x00, 0x05, 0x00,
1245 0x00, 0x00, 0x00, 0x0a
1249 if(common->data_size_from_cmnd == 0xc){
1250 rc = sizeof toc_response_data1;
1251 data = toc_response_data1;
1252 }else if(msf == 0x1){
1253 rc = sizeof toc_response_data2;
1254 data = toc_response_data2;
1256 rc = sizeof toc_response_data3;
1257 data = toc_response_data3;
1259 rc = rc < common->data_size_from_cmnd ? rc : common->data_size_from_cmnd;
1260 memcpy(buf, data, rc);
1264 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1266 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1271 buf[1] = (20-2); /* TOC data length */
1272 buf[2] = 1; /* First track number */
1273 buf[3] = 1; /* Last track number */
1274 buf[5] = 0x16; /* Data track, copying allowed */
1275 buf[6] = 0x01; /* Only track is number 1 */
1276 store_cdrom_address(&buf[8], msf, 0);
1278 buf[13] = 0x16; /* Lead-out track is data */
1279 buf[14] = 0xAA; /* Lead-out track number */
1280 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1286 static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1288 struct fsg_lun *curlun = common->curlun;
1289 int mscmnd = common->cmnd[0];
1290 u8 *buf = (u8 *) bh->buf;
1293 int changeable_values, all_pages;
1297 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
1298 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1301 pc = common->cmnd[2] >> 6;
1302 page_code = common->cmnd[2] & 0x3f;
1304 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1307 changeable_values = (pc == 1);
1308 all_pages = (page_code == 0x3f);
1311 * Write the mode parameter header. Fixed values are: default
1312 * medium type, no cache control (DPOFUA), and no block descriptors.
1313 * The only variable value is the WriteProtect bit. We will fill in
1314 * the mode data length later.
1317 if (mscmnd == MODE_SENSE) {
1318 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1321 } else { /* MODE_SENSE_10 */
1322 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1324 limit = 65535; /* Should really be FSG_BUFLEN */
1327 /* No block descriptors */
1330 * The mode pages, in numerical order. The only page we support
1331 * is the Caching page.
1333 if (page_code == 0x08 || all_pages) {
1335 buf[0] = 0x08; /* Page code */
1336 buf[1] = 10; /* Page length */
1337 memset(buf+2, 0, 10); /* None of the fields are changeable */
1339 if (!changeable_values) {
1340 buf[2] = 0x04; /* Write cache enable, */
1341 /* Read cache not disabled */
1342 /* No cache retention priorities */
1343 put_unaligned_be16(0xffff, &buf[4]);
1344 /* Don't disable prefetch */
1345 /* Minimum prefetch = 0 */
1346 put_unaligned_be16(0xffff, &buf[8]);
1347 /* Maximum prefetch */
1348 put_unaligned_be16(0xffff, &buf[10]);
1349 /* Maximum prefetch ceiling */
1355 * Check that a valid page was requested and the mode data length
1359 if (!valid_page || len > limit) {
1360 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1364 /* Store the mode data length */
1365 if (mscmnd == MODE_SENSE)
1368 put_unaligned_be16(len - 2, buf0);
1372 static int do_start_stop(struct fsg_common *common)
1374 struct fsg_lun *curlun = common->curlun;
1379 } else if (!curlun->removable) {
1380 curlun->sense_data = SS_INVALID_COMMAND;
1382 } else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
1383 (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
1384 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1388 loej = common->cmnd[4] & 0x02;
1389 start = common->cmnd[4] & 0x01;
1392 * Our emulation doesn't support mounting; the medium is
1393 * available for use as soon as it is loaded.
1396 if (!fsg_lun_is_open(curlun)) {
1397 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1403 /* Are we allowed to unload the media? */
1404 if (curlun->prevent_medium_removal) {
1405 LDBG(curlun, "unload attempt prevented\n");
1406 curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
1413 up_read(&common->filesem);
1414 down_write(&common->filesem);
1415 fsg_lun_close(curlun);
1416 up_write(&common->filesem);
1417 down_read(&common->filesem);
1422 static int do_prevent_allow(struct fsg_common *common)
1424 struct fsg_lun *curlun = common->curlun;
1427 if (!common->curlun) {
1429 } else if (!common->curlun->removable) {
1430 common->curlun->sense_data = SS_INVALID_COMMAND;
1434 prevent = common->cmnd[4] & 0x01;
1435 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
1436 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1440 if (!curlun->nofua && curlun->prevent_medium_removal && !prevent)
1441 fsg_lun_fsync_sub(curlun);
1443 schedule_work(&common->fsync_work);
1445 curlun->prevent_medium_removal = prevent;
1449 static void do_fsync(struct work_struct *work)
1451 struct fsg_common *common =
1452 container_of(work, struct fsg_common, fsync_work);
1454 struct file *filp = common->curlun->filp;
1455 static int syncing = 0;
1457 if (common->curlun->ro || !filp)
1462 printk("ums sync 1\n");
1464 printk("ums sync 0\n");
1469 static int do_read_format_capacities(struct fsg_common *common,
1470 struct fsg_buffhd *bh)
1472 struct fsg_lun *curlun = common->curlun;
1473 u8 *buf = (u8 *) bh->buf;
1475 buf[0] = buf[1] = buf[2] = 0;
1476 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1479 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1480 /* Number of blocks */
1481 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
1482 buf[4] = 0x02; /* Current capacity */
1486 static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1488 struct fsg_lun *curlun = common->curlun;
1490 /* We don't support MODE SELECT */
1492 curlun->sense_data = SS_INVALID_COMMAND;
1497 /*-------------------------------------------------------------------------*/
1499 static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1503 rc = fsg_set_halt(fsg, fsg->bulk_in);
1505 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1507 if (rc != -EAGAIN) {
1508 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1513 /* Wait for a short time and then try again */
1514 if (msleep_interruptible(100) != 0)
1516 rc = usb_ep_set_halt(fsg->bulk_in);
1521 static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1525 DBG(fsg, "bulk-in set wedge\n");
1526 rc = usb_ep_set_wedge(fsg->bulk_in);
1528 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1530 if (rc != -EAGAIN) {
1531 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1536 /* Wait for a short time and then try again */
1537 if (msleep_interruptible(100) != 0)
1539 rc = usb_ep_set_wedge(fsg->bulk_in);
1544 static int throw_away_data(struct fsg_common *common)
1546 struct fsg_buffhd *bh;
1550 for (bh = common->next_buffhd_to_drain;
1551 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1552 bh = common->next_buffhd_to_drain) {
1554 /* Throw away the data in a filled buffer */
1555 if (bh->state == BUF_STATE_FULL) {
1557 bh->state = BUF_STATE_EMPTY;
1558 common->next_buffhd_to_drain = bh->next;
1560 /* A short packet or an error ends everything */
1561 if (bh->outreq->actual < bh->bulk_out_intended_length ||
1562 bh->outreq->status != 0) {
1563 raise_exception(common,
1564 FSG_STATE_ABORT_BULK_OUT);
1570 /* Try to submit another request if we need one */
1571 bh = common->next_buffhd_to_fill;
1572 if (bh->state == BUF_STATE_EMPTY
1573 && common->usb_amount_left > 0) {
1574 amount = min(common->usb_amount_left, FSG_BUFLEN);
1577 * Except at the end of the transfer, amount will be
1578 * equal to the buffer size, which is divisible by
1579 * the bulk-out maxpacket size.
1581 set_bulk_out_req_length(common, bh, amount);
1582 if (!start_out_transfer(common, bh))
1583 /* Dunno what to do if common->fsg is NULL */
1585 common->next_buffhd_to_fill = bh->next;
1586 common->usb_amount_left -= amount;
1590 /* Otherwise wait for something to happen */
1591 rc = sleep_thread(common);
1598 static int finish_reply(struct fsg_common *common)
1600 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1603 switch (common->data_dir) {
1605 break; /* Nothing to send */
1608 * If we don't know whether the host wants to read or write,
1609 * this must be CB or CBI with an unknown command. We mustn't
1610 * try to send or receive any data. So stall both bulk pipes
1611 * if we can and wait for a reset.
1613 case DATA_DIR_UNKNOWN:
1614 if (!common->can_stall) {
1616 } else if (fsg_is_set(common)) {
1617 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1618 rc = halt_bulk_in_endpoint(common->fsg);
1620 /* Don't know what to do if common->fsg is NULL */
1625 /* All but the last buffer of data must have already been sent */
1626 case DATA_DIR_TO_HOST:
1627 if (common->data_size == 0) {
1628 /* Nothing to send */
1630 /* Don't know what to do if common->fsg is NULL */
1631 } else if (!fsg_is_set(common)) {
1634 /* If there's no residue, simply send the last buffer */
1635 } else if (common->residue == 0) {
1636 bh->inreq->zero = 0;
1637 if (!start_in_transfer(common, bh))
1639 common->next_buffhd_to_fill = bh->next;
1642 * For Bulk-only, mark the end of the data with a short
1643 * packet. If we are allowed to stall, halt the bulk-in
1644 * endpoint. (Note: This violates the Bulk-Only Transport
1645 * specification, which requires us to pad the data if we
1646 * don't halt the endpoint. Presumably nobody will mind.)
1649 bh->inreq->zero = 1;
1650 #ifdef CONFIG_USB_SPRD_DWC
1651 if (common->curlun->sense_data && common->can_stall
1652 && common->cmnd[0] != INQUIRY && common->cmnd[0] != REQUEST_SENSE){
1653 rc = halt_bulk_in_endpoint(common->fsg);
1655 bh->state = BUF_STATE_EMPTY;
1656 common->next_buffhd_to_fill = bh->next;
1657 common->state = FSG_STATE_IDLE;
1661 if (!start_in_transfer(common, bh))
1663 common->next_buffhd_to_fill = bh->next;
1664 if (common->can_stall)
1665 rc = halt_bulk_in_endpoint(common->fsg);
1670 * We have processed all we want from the data the host has sent.
1671 * There may still be outstanding bulk-out requests.
1673 case DATA_DIR_FROM_HOST:
1674 if (common->residue == 0) {
1675 /* Nothing to receive */
1677 /* Did the host stop sending unexpectedly early? */
1678 } else if (common->short_packet_received) {
1679 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1683 * We haven't processed all the incoming data. Even though
1684 * we may be allowed to stall, doing so would cause a race.
1685 * The controller may already have ACK'ed all the remaining
1686 * bulk-out packets, in which case the host wouldn't see a
1687 * STALL. Not realizing the endpoint was halted, it wouldn't
1688 * clear the halt -- leading to problems later on.
1691 } else if (common->can_stall) {
1692 if (fsg_is_set(common))
1693 fsg_set_halt(common->fsg,
1694 common->fsg->bulk_out);
1695 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1700 * We can't stall. Read in the excess data and throw it
1704 rc = throw_away_data(common);
1711 static int send_status(struct fsg_common *common)
1713 struct fsg_lun *curlun = common->curlun;
1714 struct fsg_buffhd *bh;
1715 struct bulk_cs_wrap *csw;
1717 u8 status = US_BULK_STAT_OK;
1720 /* Wait for the next buffer to become available */
1721 bh = common->next_buffhd_to_fill;
1722 while (bh->state != BUF_STATE_EMPTY) {
1723 rc = sleep_thread(common);
1729 sd = curlun->sense_data;
1730 sdinfo = curlun->sense_data_info;
1731 } else if (common->bad_lun_okay)
1734 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1736 if (common->phase_error) {
1737 DBG(common, "sending phase-error status\n");
1738 status = US_BULK_STAT_PHASE;
1739 sd = SS_INVALID_COMMAND;
1740 } else if (sd != SS_NO_SENSE) {
1741 DBG(common, "sending command-failure status\n");
1742 status = US_BULK_STAT_FAIL;
1743 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1745 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1748 /* Store and send the Bulk-only CSW */
1749 csw = (void *)bh->buf;
1751 csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
1752 csw->Tag = common->tag;
1753 csw->Residue = cpu_to_le32(common->residue);
1754 csw->Status = status;
1756 bh->inreq->length = US_BULK_CS_WRAP_LEN;
1757 bh->inreq->zero = 0;
1758 if (!start_in_transfer(common, bh))
1759 /* Don't know what to do if common->fsg is NULL */
1762 common->next_buffhd_to_fill = bh->next;
1767 /*-------------------------------------------------------------------------*/
1770 * Check whether the command is properly formed and whether its data size
1771 * and direction agree with the values we already have.
1773 static int check_command(struct fsg_common *common, int cmnd_size,
1774 enum data_direction data_dir, unsigned int mask,
1775 int needs_medium, const char *name)
1778 unsigned int lun = common->cmnd[1] >> 5;
1779 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1781 struct fsg_lun *curlun;
1784 if (common->data_dir != DATA_DIR_UNKNOWN)
1785 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1787 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1788 name, cmnd_size, dirletter[(int) data_dir],
1789 common->data_size_from_cmnd, common->cmnd_size, hdlen);
1792 * We can't reply at all until we know the correct data direction
1795 if (common->data_size_from_cmnd == 0)
1796 data_dir = DATA_DIR_NONE;
1797 if (common->data_size < common->data_size_from_cmnd) {
1799 * Host data size < Device data size is a phase error.
1800 * Carry out the command, but only transfer as much as
1803 common->data_size_from_cmnd = common->data_size;
1804 common->phase_error = 1;
1806 common->residue = common->data_size;
1807 common->usb_amount_left = common->data_size;
1809 /* Conflicting data directions is a phase error */
1810 if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
1811 common->phase_error = 1;
1815 /* Verify the length of the command itself */
1816 if (cmnd_size != common->cmnd_size) {
1819 * Special case workaround: There are plenty of buggy SCSI
1820 * implementations. Many have issues with cbw->Length
1821 * field passing a wrong command size. For those cases we
1822 * always try to work around the problem by using the length
1823 * sent by the host side provided it is at least as large
1824 * as the correct command length.
1825 * Examples of such cases would be MS-Windows, which issues
1826 * REQUEST SENSE with cbw->Length == 12 where it should
1827 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1828 * REQUEST SENSE with cbw->Length == 10 where it should
1831 if (cmnd_size <= common->cmnd_size) {
1832 DBG(common, "%s is buggy! Expected length %d "
1833 "but we got %d\n", name,
1834 cmnd_size, common->cmnd_size);
1835 cmnd_size = common->cmnd_size;
1837 common->phase_error = 1;
1842 /* Check that the LUN values are consistent */
1843 if (common->lun != lun)
1844 DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n",
1848 curlun = common->curlun;
1850 if (common->cmnd[0] != REQUEST_SENSE) {
1851 curlun->sense_data = SS_NO_SENSE;
1852 curlun->sense_data_info = 0;
1853 curlun->info_valid = 0;
1856 common->bad_lun_okay = 0;
1859 * INQUIRY and REQUEST SENSE commands are explicitly allowed
1860 * to use unsupported LUNs; all others may not.
1862 if (common->cmnd[0] != INQUIRY &&
1863 common->cmnd[0] != REQUEST_SENSE) {
1864 DBG(common, "unsupported LUN %u\n", common->lun);
1870 * If a unit attention condition exists, only INQUIRY and
1871 * REQUEST SENSE commands are allowed; anything else must fail.
1873 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1874 common->cmnd[0] != INQUIRY &&
1875 common->cmnd[0] != REQUEST_SENSE) {
1876 curlun->sense_data = curlun->unit_attention_data;
1877 curlun->unit_attention_data = SS_NO_SENSE;
1881 /* Check that only command bytes listed in the mask are non-zero */
1882 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
1883 for (i = 1; i < cmnd_size; ++i) {
1884 if (common->cmnd[i] && !(mask & (1 << i))) {
1886 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1891 /* If the medium isn't mounted and the command needs to access
1892 * it, return an error. */
1893 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
1894 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1901 /* wrapper of check_command for data size in blocks handling */
1902 static int check_command_size_in_blocks(struct fsg_common *common,
1903 int cmnd_size, enum data_direction data_dir,
1904 unsigned int mask, int needs_medium, const char *name)
1907 common->data_size_from_cmnd <<= common->curlun->blkbits;
1908 return check_command(common, cmnd_size, data_dir,
1909 mask, needs_medium, name);
1912 static int do_scsi_command(struct fsg_common *common)
1914 struct fsg_buffhd *bh;
1916 int reply = -EINVAL;
1918 static char unknown[16];
1922 /* Wait for the next buffer to become available for data or status */
1923 bh = common->next_buffhd_to_fill;
1924 common->next_buffhd_to_drain = bh;
1925 while (bh->state != BUF_STATE_EMPTY) {
1926 rc = sleep_thread(common);
1930 common->phase_error = 0;
1931 common->short_packet_received = 0;
1933 down_read(&common->filesem); /* We're using the backing file */
1934 switch (common->cmnd[0]) {
1937 common->data_size_from_cmnd = common->cmnd[4];
1938 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1942 reply = do_inquiry(common, bh);
1946 common->data_size_from_cmnd = common->cmnd[4];
1947 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1951 reply = do_mode_select(common, bh);
1954 case MODE_SELECT_10:
1955 common->data_size_from_cmnd =
1956 get_unaligned_be16(&common->cmnd[7]);
1957 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1961 reply = do_mode_select(common, bh);
1965 common->data_size_from_cmnd = common->cmnd[4];
1966 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1967 (1<<1) | (1<<2) | (1<<4), 0,
1970 reply = do_mode_sense(common, bh);
1974 common->data_size_from_cmnd =
1975 get_unaligned_be16(&common->cmnd[7]);
1976 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1977 (1<<1) | (1<<2) | (3<<7), 0,
1980 reply = do_mode_sense(common, bh);
1983 case ALLOW_MEDIUM_REMOVAL:
1984 common->data_size_from_cmnd = 0;
1985 reply = check_command(common, 6, DATA_DIR_NONE,
1987 "PREVENT-ALLOW MEDIUM REMOVAL");
1989 reply = do_prevent_allow(common);
1993 i = common->cmnd[4];
1994 common->data_size_from_cmnd = (i == 0) ? 256 : i;
1995 reply = check_command_size_in_blocks(common, 6,
2000 reply = do_read(common);
2004 common->data_size_from_cmnd =
2005 get_unaligned_be16(&common->cmnd[7]);
2006 reply = check_command_size_in_blocks(common, 10,
2008 (1<<1) | (0xf<<2) | (3<<7), 1,
2011 reply = do_read(common);
2015 common->data_size_from_cmnd =
2016 get_unaligned_be32(&common->cmnd[6]);
2017 reply = check_command_size_in_blocks(common, 12,
2019 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2022 reply = do_read(common);
2026 common->data_size_from_cmnd = 8;
2027 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2028 (0xf<<2) | (1<<8), 1,
2031 reply = do_read_capacity(common, bh);
2035 if (!common->curlun || !common->curlun->cdrom)
2037 common->data_size_from_cmnd =
2038 get_unaligned_be16(&common->cmnd[7]);
2039 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2040 (3<<7) | (0x1f<<1), 1,
2043 reply = do_read_header(common, bh);
2047 if (!common->curlun || !common->curlun->cdrom)
2049 common->data_size_from_cmnd =
2050 get_unaligned_be16(&common->cmnd[7]);
2051 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2052 (0xf<<6) | (3<<1), 1,
2055 reply = do_read_toc(common, bh);
2058 case READ_FORMAT_CAPACITIES:
2059 common->data_size_from_cmnd =
2060 get_unaligned_be16(&common->cmnd[7]);
2061 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2063 "READ FORMAT CAPACITIES");
2065 reply = do_read_format_capacities(common, bh);
2069 common->data_size_from_cmnd = common->cmnd[4];
2070 reply = check_command(common, 6, DATA_DIR_TO_HOST,
2074 reply = do_request_sense(common, bh);
2078 common->data_size_from_cmnd = 0;
2079 reply = check_command(common, 6, DATA_DIR_NONE,
2083 reply = do_start_stop(common);
2086 case SYNCHRONIZE_CACHE:
2087 common->data_size_from_cmnd = 0;
2088 reply = check_command(common, 10, DATA_DIR_NONE,
2089 (0xf<<2) | (3<<7), 1,
2090 "SYNCHRONIZE CACHE");
2092 reply = do_synchronize_cache(common);
2095 case TEST_UNIT_READY:
2096 common->data_size_from_cmnd = 0;
2097 reply = check_command(common, 6, DATA_DIR_NONE,
2103 * Although optional, this command is used by MS-Windows. We
2104 * support a minimal version: BytChk must be 0.
2107 common->data_size_from_cmnd = 0;
2108 reply = check_command(common, 10, DATA_DIR_NONE,
2109 (1<<1) | (0xf<<2) | (3<<7), 1,
2112 reply = do_verify(common);
2116 i = common->cmnd[4];
2117 common->data_size_from_cmnd = (i == 0) ? 256 : i;
2118 reply = check_command_size_in_blocks(common, 6,
2123 reply = do_write(common);
2127 common->data_size_from_cmnd =
2128 get_unaligned_be16(&common->cmnd[7]);
2129 reply = check_command_size_in_blocks(common, 10,
2131 (1<<1) | (0xf<<2) | (3<<7), 1,
2134 reply = do_write(common);
2138 common->data_size_from_cmnd =
2139 get_unaligned_be32(&common->cmnd[6]);
2140 reply = check_command_size_in_blocks(common, 12,
2142 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2145 reply = do_write(common);
2149 * Some mandatory commands that we recognize but don't implement.
2150 * They don't mean much in this setting. It's left as an exercise
2151 * for anyone interested to implement RESERVE and RELEASE in terms
2157 case SEND_DIAGNOSTIC:
2162 common->data_size_from_cmnd = 0;
2163 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2164 reply = check_command(common, common->cmnd_size,
2165 DATA_DIR_UNKNOWN, ~0, 0, unknown);
2167 common->curlun->sense_data = SS_INVALID_COMMAND;
2172 up_read(&common->filesem);
2174 if (reply == -EINTR || signal_pending(current))
2177 /* Set up the single reply buffer for finish_reply() */
2178 if (reply == -EINVAL)
2179 reply = 0; /* Error reply length */
2180 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2181 reply = min((u32)reply, common->data_size_from_cmnd);
2182 bh->inreq->length = reply;
2183 bh->state = BUF_STATE_FULL;
2184 common->residue -= reply;
2185 } /* Otherwise it's already set */
2191 /*-------------------------------------------------------------------------*/
2193 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2195 struct usb_request *req = bh->outreq;
2196 struct bulk_cb_wrap *cbw = req->buf;
2197 struct fsg_common *common = fsg->common;
2199 /* Was this a real packet? Should it be ignored? */
2200 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2203 /* Is the CBW valid? */
2204 if (req->actual != US_BULK_CB_WRAP_LEN ||
2205 cbw->Signature != cpu_to_le32(
2207 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2209 le32_to_cpu(cbw->Signature));
2212 * The Bulk-only spec says we MUST stall the IN endpoint
2213 * (6.6.1), so it's unavoidable. It also says we must
2214 * retain this state until the next reset, but there's
2215 * no way to tell the controller driver it should ignore
2216 * Clear-Feature(HALT) requests.
2218 * We aren't required to halt the OUT endpoint; instead
2219 * we can simply accept and discard any data received
2220 * until the next reset.
2222 wedge_bulk_in_endpoint(fsg);
2223 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2227 /* Is the CBW meaningful? */
2228 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
2229 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2230 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2232 cbw->Lun, cbw->Flags, cbw->Length);
2235 * We can do anything we want here, so let's stall the
2236 * bulk pipes if we are allowed to.
2238 if (common->can_stall) {
2239 fsg_set_halt(fsg, fsg->bulk_out);
2240 halt_bulk_in_endpoint(fsg);
2245 /* Save the command for later */
2246 common->cmnd_size = cbw->Length;
2247 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2248 if (cbw->Flags & US_BULK_FLAG_IN)
2249 common->data_dir = DATA_DIR_TO_HOST;
2251 common->data_dir = DATA_DIR_FROM_HOST;
2252 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2253 if (common->data_size == 0)
2254 common->data_dir = DATA_DIR_NONE;
2255 common->lun = cbw->Lun;
2256 if (common->lun < common->nluns)
2257 common->curlun = &common->luns[common->lun];
2259 common->curlun = NULL;
2260 common->tag = cbw->Tag;
2264 static int get_next_command(struct fsg_common *common)
2266 struct fsg_buffhd *bh;
2269 /* Wait for the next buffer to become available */
2270 bh = common->next_buffhd_to_fill;
2271 while (bh->state != BUF_STATE_EMPTY) {
2272 rc = sleep_thread(common);
2277 /* Queue a request to read a Bulk-only CBW */
2278 set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
2279 if (!start_out_transfer(common, bh))
2280 /* Don't know what to do if common->fsg is NULL */
2284 * We will drain the buffer in software, which means we
2285 * can reuse it for the next filling. No need to advance
2286 * next_buffhd_to_fill.
2289 /* Wait for the CBW to arrive */
2290 while (bh->state != BUF_STATE_FULL) {
2291 rc = sleep_thread(common);
2296 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2297 bh->state = BUF_STATE_EMPTY;
2303 /*-------------------------------------------------------------------------*/
2305 static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2306 struct usb_request **preq)
2308 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2311 ERROR(common, "can't allocate request for %s\n", ep->name);
2315 /* Reset interface setting and re-init endpoint state (toggle etc). */
2316 static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
2318 struct fsg_dev *fsg;
2321 if (common->running)
2322 DBG(common, "reset interface\n");
2325 /* Deallocate the requests */
2329 for (i = 0; i < fsg_num_buffers; ++i) {
2330 struct fsg_buffhd *bh = &common->buffhds[i];
2333 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2337 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2342 /* Disable the endpoints */
2343 if (fsg->bulk_in_enabled) {
2344 usb_ep_disable(fsg->bulk_in);
2345 fsg->bulk_in_enabled = 0;
2347 if (fsg->bulk_out_enabled) {
2348 usb_ep_disable(fsg->bulk_out);
2349 fsg->bulk_out_enabled = 0;
2353 wake_up(&common->fsg_wait);
2356 common->running = 0;
2360 if(new_fsg->common != common){
2361 printk("%s new_fsg->common = 0x%x common = 0x%x\n",__func__,new_fsg->common,common);
2364 common->fsg = new_fsg;
2367 /* Enable the endpoints */
2368 rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
2371 rc = usb_ep_enable(fsg->bulk_in);
2374 fsg->bulk_in->driver_data = common;
2375 fsg->bulk_in_enabled = 1;
2377 rc = config_ep_by_speed(common->gadget, &(fsg->function),
2381 rc = usb_ep_enable(fsg->bulk_out);
2384 fsg->bulk_out->driver_data = common;
2385 fsg->bulk_out_enabled = 1;
2386 common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
2387 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2389 /* Allocate the requests */
2390 for (i = 0; i < fsg_num_buffers; ++i) {
2391 struct fsg_buffhd *bh = &common->buffhds[i];
2393 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2396 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2399 bh->inreq->buf = bh->outreq->buf = bh->buf;
2400 bh->inreq->context = bh->outreq->context = bh;
2401 bh->inreq->complete = bulk_in_complete;
2402 bh->outreq->complete = bulk_out_complete;
2405 common->running = 1;
2406 for (i = 0; i < common->nluns; ++i)
2407 common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2412 /****************************** ALT CONFIGS ******************************/
2414 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2416 struct fsg_dev *fsg = fsg_from_func(f);
2418 fsg->common->new_fsg = fsg;
2419 printk("%s f=%p,fsg=%p\n",__func__,f,fsg);
2420 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2421 return USB_GADGET_DELAYED_STATUS;
2424 static void fsg_disable(struct usb_function *f)
2426 struct fsg_dev *fsg = fsg_from_func(f);
2427 printk("%s f=%p\n",__func__,f);
2428 fsg->common->new_fsg = NULL;
2429 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2433 /*-------------------------------------------------------------------------*/
2435 static void handle_exception(struct fsg_common *common)
2439 struct fsg_buffhd *bh;
2440 enum fsg_state old_state;
2441 struct fsg_lun *curlun;
2442 unsigned int exception_req_tag;
2445 * Clear the existing signals. Anything but SIGUSR1 is converted
2446 * into a high-priority EXIT exception.
2450 dequeue_signal_lock(current, ¤t->blocked, &info);
2453 if (sig != SIGUSR1) {
2454 if (common->state < FSG_STATE_EXIT)
2455 DBG(common, "Main thread exiting on signal\n");
2456 raise_exception(common, FSG_STATE_EXIT);
2460 /* Cancel all the pending transfers */
2461 if (likely(common->fsg)) {
2462 for (i = 0; i < fsg_num_buffers; ++i) {
2463 bh = &common->buffhds[i];
2465 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2466 if (bh->outreq_busy)
2467 usb_ep_dequeue(common->fsg->bulk_out,
2471 /* Wait until everything is idle */
2474 for (i = 0; i < fsg_num_buffers; ++i) {
2475 bh = &common->buffhds[i];
2476 num_active += bh->inreq_busy + bh->outreq_busy;
2478 if (num_active == 0)
2480 if (sleep_thread(common))
2484 /* Clear out the controller's fifos */
2485 if (common->fsg->bulk_in_enabled)
2486 usb_ep_fifo_flush(common->fsg->bulk_in);
2487 if (common->fsg->bulk_out_enabled)
2488 usb_ep_fifo_flush(common->fsg->bulk_out);
2492 * Reset the I/O buffer states and pointers, the SCSI
2493 * state, and the exception. Then invoke the handler.
2495 spin_lock_irq(&common->lock);
2497 for (i = 0; i < fsg_num_buffers; ++i) {
2498 bh = &common->buffhds[i];
2499 bh->state = BUF_STATE_EMPTY;
2501 common->next_buffhd_to_fill = &common->buffhds[0];
2502 common->next_buffhd_to_drain = &common->buffhds[0];
2503 exception_req_tag = common->exception_req_tag;
2504 old_state = common->state;
2506 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2507 common->state = FSG_STATE_STATUS_PHASE;
2509 for (i = 0; i < common->nluns; ++i) {
2510 curlun = &common->luns[i];
2511 curlun->prevent_medium_removal = 0;
2512 curlun->sense_data = SS_NO_SENSE;
2513 curlun->unit_attention_data = SS_NO_SENSE;
2514 curlun->sense_data_info = 0;
2515 curlun->info_valid = 0;
2517 common->state = FSG_STATE_IDLE;
2519 spin_unlock_irq(&common->lock);
2521 /* Carry out any extra actions required for the exception */
2522 switch (old_state) {
2523 case FSG_STATE_ABORT_BULK_OUT:
2524 send_status(common);
2525 spin_lock_irq(&common->lock);
2526 if (common->state == FSG_STATE_STATUS_PHASE)
2527 common->state = FSG_STATE_IDLE;
2528 spin_unlock_irq(&common->lock);
2531 case FSG_STATE_RESET:
2533 * In case we were forced against our will to halt a
2534 * bulk endpoint, clear the halt now. (The SuperH UDC
2537 if (!fsg_is_set(common))
2539 if (test_and_clear_bit(IGNORE_BULK_OUT,
2540 &common->fsg->atomic_bitflags))
2541 usb_ep_clear_halt(common->fsg->bulk_in);
2543 if (common->ep0_req_tag == exception_req_tag)
2544 ep0_queue(common); /* Complete the status stage */
2547 * Technically this should go here, but it would only be
2548 * a waste of time. Ditto for the INTERFACE_CHANGE and
2549 * CONFIG_CHANGE cases.
2551 /* for (i = 0; i < common->nluns; ++i) */
2552 /* common->luns[i].unit_attention_data = */
2553 /* SS_RESET_OCCURRED; */
2556 case FSG_STATE_CONFIG_CHANGE:
2557 do_set_interface(common, common->new_fsg);
2558 if (common->new_fsg)
2559 usb_composite_setup_continue(common->cdev);
2562 case FSG_STATE_EXIT:
2563 case FSG_STATE_TERMINATED:
2564 do_set_interface(common, NULL); /* Free resources */
2565 spin_lock_irq(&common->lock);
2566 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2567 spin_unlock_irq(&common->lock);
2570 case FSG_STATE_INTERFACE_CHANGE:
2571 case FSG_STATE_DISCONNECT:
2572 case FSG_STATE_COMMAND_PHASE:
2573 case FSG_STATE_DATA_PHASE:
2574 case FSG_STATE_STATUS_PHASE:
2575 case FSG_STATE_IDLE:
2581 /*-------------------------------------------------------------------------*/
2583 static int fsg_main_thread(void *common_)
2585 struct fsg_common *common = common_;
2588 * Allow the thread to be killed by a signal, but set the signal mask
2589 * to block everything but INT, TERM, KILL, and USR1.
2591 allow_signal(SIGINT);
2592 allow_signal(SIGTERM);
2593 allow_signal(SIGKILL);
2594 allow_signal(SIGUSR1);
2596 /* Allow the thread to be frozen */
2600 * Arrange for userspace references to be interpreted as kernel
2601 * pointers. That way we can pass a kernel pointer to a routine
2602 * that expects a __user pointer and it will work okay.
2607 while (common->state != FSG_STATE_TERMINATED) {
2608 if (exception_in_progress(common) || signal_pending(current)) {
2609 handle_exception(common);
2613 if (!common->running) {
2614 sleep_thread(common);
2618 if (get_next_command(common))
2621 spin_lock_irq(&common->lock);
2622 if (!exception_in_progress(common))
2623 common->state = FSG_STATE_DATA_PHASE;
2624 spin_unlock_irq(&common->lock);
2626 if (do_scsi_command(common) || finish_reply(common))
2629 spin_lock_irq(&common->lock);
2630 if (!exception_in_progress(common))
2631 common->state = FSG_STATE_STATUS_PHASE;
2632 spin_unlock_irq(&common->lock);
2634 if (send_status(common))
2637 spin_lock_irq(&common->lock);
2638 if (!exception_in_progress(common))
2639 common->state = FSG_STATE_IDLE;
2640 spin_unlock_irq(&common->lock);
2643 spin_lock_irq(&common->lock);
2644 common->thread_task = NULL;
2645 spin_unlock_irq(&common->lock);
2647 if (!common->ops || !common->ops->thread_exits
2648 || common->ops->thread_exits(common) < 0) {
2649 struct fsg_lun *curlun = common->luns;
2650 unsigned i = common->nluns;
2652 down_write(&common->filesem);
2653 for (; i--; ++curlun) {
2654 if (!fsg_lun_is_open(curlun))
2657 fsg_lun_close(curlun);
2658 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2660 up_write(&common->filesem);
2663 /* Let fsg_unbind() know the thread has exited */
2664 complete_and_exit(&common->thread_notifier, 0);
2668 /*************************** DEVICE ATTRIBUTES ***************************/
2670 static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
2671 static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, fsg_store_nofua);
2672 static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
2673 static DEVICE_ATTR(cdrom, 0644, fsg_show_cdrom, fsg_store_cdrom);
2675 static struct device_attribute dev_attr_ro_cdrom =
2676 __ATTR(ro, 0444, fsg_show_ro, NULL);
2677 static struct device_attribute dev_attr_file_nonremovable =
2678 __ATTR(file, 0444, fsg_show_file, NULL);
2681 /****************************** FSG COMMON ******************************/
2683 static void fsg_common_release(struct kref *ref);
2685 static void fsg_lun_release(struct device *dev)
2687 /* Nothing needs to be done */
2690 static inline void fsg_common_get(struct fsg_common *common)
2692 kref_get(&common->ref);
2695 static inline void fsg_common_put(struct fsg_common *common)
2697 kref_put(&common->ref, fsg_common_release);
2700 static struct fsg_common *fsg_common_init(struct fsg_common *common,
2701 struct usb_composite_dev *cdev,
2702 struct fsg_config *cfg)
2704 struct usb_gadget *gadget = cdev->gadget;
2705 struct fsg_buffhd *bh;
2706 struct fsg_lun *curlun;
2707 struct fsg_lun_config *lcfg;
2711 rc = fsg_num_buffers_validate();
2715 /* Find out how many LUNs there should be */
2717 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2718 dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
2719 return ERR_PTR(-EINVAL);
2724 common = kzalloc(sizeof *common, GFP_KERNEL);
2726 return ERR_PTR(-ENOMEM);
2727 common->free_storage_on_release = 1;
2729 memset(common, 0, sizeof *common);
2730 common->free_storage_on_release = 0;
2733 common->buffhds = kcalloc(fsg_num_buffers,
2734 sizeof *(common->buffhds), GFP_KERNEL);
2735 if (!common->buffhds) {
2736 if (common->free_storage_on_release)
2738 return ERR_PTR(-ENOMEM);
2741 common->ops = cfg->ops;
2742 common->private_data = cfg->private_data;
2744 common->gadget = gadget;
2745 common->ep0 = gadget->ep0;
2746 common->ep0req = cdev->req;
2747 common->cdev = cdev;
2748 #ifdef CONFIG_USB_SPRD_DWC
2749 common->can_stall = 1;
2751 /* Maybe allocate device-global string IDs, and patch descriptors */
2752 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2753 rc = usb_string_id(cdev);
2754 if (unlikely(rc < 0))
2756 fsg_strings[FSG_STRING_INTERFACE].id = rc;
2757 fsg_intf_desc.iInterface = rc;
2761 * Create the LUNs, open their backing files, and register the
2762 * LUN devices in sysfs.
2764 curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
2765 if (unlikely(!curlun)) {
2769 common->luns = curlun;
2771 init_rwsem(&common->filesem);
2773 for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
2774 curlun->cdrom = !!lcfg->cdrom;
2775 curlun->ro = lcfg->cdrom || lcfg->ro;
2776 curlun->initially_ro = curlun->ro;
2777 curlun->removable = lcfg->removable;
2778 curlun->nofua = lcfg->nofua;
2779 curlun->dev.release = fsg_lun_release;
2780 curlun->dev.parent = &gadget->dev;
2781 /* curlun->dev.driver = &fsg_driver.driver; XXX */
2782 dev_set_drvdata(&curlun->dev, &common->filesem);
2783 dev_set_name(&curlun->dev, "lun%d", i);
2785 rc = device_register(&curlun->dev);
2787 INFO(common, "failed to register LUN%d: %d\n", i, rc);
2789 put_device(&curlun->dev);
2793 rc = device_create_file(&curlun->dev,
2795 ? &dev_attr_ro_cdrom
2799 rc = device_create_file(&curlun->dev,
2802 : &dev_attr_file_nonremovable);
2805 rc = device_create_file(&curlun->dev, &dev_attr_nofua);
2808 rc = device_create_file(&curlun->dev, &dev_attr_cdrom);
2812 if (lcfg->filename) {
2813 rc = fsg_lun_open(curlun, lcfg->filename);
2816 } else if (!curlun->removable) {
2817 ERROR(common, "no file given for LUN%d\n", i);
2822 common->nluns = nluns;
2823 common->board_support_luns = nluns;
2825 /* Data buffers cyclic list */
2826 bh = common->buffhds;
2827 i = fsg_num_buffers;
2828 goto buffhds_first_it;
2833 bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
2834 if (unlikely(!bh->buf)) {
2839 bh->next = common->buffhds;
2841 /* Prepare inquiryString */
2842 i = get_default_bcdDevice();
2843 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2844 "%-8s%-16s%04x", cfg->vendor_name ?: "Linux",
2845 /* Assume product name dependent on the first LUN */
2846 cfg->product_name ?: (common->luns->cdrom
2847 ? "File-Stor Gadget"
2848 : "File-CD Gadget"),
2852 * Some peripheral controllers are known not to be able to
2853 * halt bulk endpoints correctly. If one of them is present,
2856 common->can_stall = cfg->can_stall &&
2857 !(gadget_is_at91(common->gadget));
2859 spin_lock_init(&common->lock);
2860 kref_init(&common->ref);
2862 /* Tell the thread to start working */
2863 common->thread_task =
2864 kthread_create(fsg_main_thread, common, "file-storage");
2865 if (IS_ERR(common->thread_task)) {
2866 rc = PTR_ERR(common->thread_task);
2869 init_completion(&common->thread_notifier);
2870 init_waitqueue_head(&common->fsg_wait);
2872 INIT_WORK(&common->fsync_work, &do_fsync);
2875 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2876 INFO(common, "Number of LUNs=%d\n", common->nluns);
2878 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2879 for (i = 0, nluns = common->nluns, curlun = common->luns;
2882 char *p = "(no medium)";
2883 if (fsg_lun_is_open(curlun)) {
2886 p = d_path(&curlun->filp->f_path,
2892 LINFO(curlun, "LUN: %s%s%sfile: %s\n",
2893 curlun->removable ? "removable " : "",
2894 curlun->ro ? "read only " : "",
2895 curlun->cdrom ? "CD-ROM " : "",
2900 DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
2902 wake_up_process(common->thread_task);
2907 common->nluns = i + 1;
2909 common->state = FSG_STATE_TERMINATED; /* The thread is dead */
2910 /* Call fsg_common_release() directly, ref might be not initialised. */
2911 fsg_common_release(&common->ref);
2915 static void fsg_common_release(struct kref *ref)
2917 struct fsg_common *common = container_of(ref, struct fsg_common, ref);
2919 /* If the thread isn't already dead, tell it to exit now */
2920 if (common->state != FSG_STATE_TERMINATED) {
2921 raise_exception(common, FSG_STATE_EXIT);
2922 wait_for_completion(&common->thread_notifier);
2925 if (likely(common->luns)) {
2926 struct fsg_lun *lun = common->luns;
2927 unsigned i = common->nluns;
2929 /* In error recovery common->nluns may be zero. */
2930 for (; i; --i, ++lun) {
2931 device_remove_file(&lun->dev, &dev_attr_nofua);
2932 device_remove_file(&lun->dev,
2934 ? &dev_attr_ro_cdrom
2936 device_remove_file(&lun->dev,
2939 : &dev_attr_file_nonremovable);
2940 device_remove_file(&lun->dev, &dev_attr_cdrom);
2942 device_unregister(&lun->dev);
2945 kfree(common->luns);
2949 struct fsg_buffhd *bh = common->buffhds;
2950 unsigned i = fsg_num_buffers;
2953 } while (++bh, --i);
2956 kfree(common->buffhds);
2957 if (common->free_storage_on_release)
2962 /*-------------------------------------------------------------------------*/
2964 static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2966 struct fsg_dev *fsg = fsg_from_func(f);
2967 struct fsg_common *common = fsg->common;
2969 DBG(fsg, "unbind\n");
2970 if (fsg->common->fsg == fsg) {
2971 fsg->common->new_fsg = NULL;
2972 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2973 /* FIXME: make interruptible or killable somehow? */
2974 wait_event(common->fsg_wait, common->fsg != fsg);
2977 fsg_common_put(common);
2978 usb_free_all_descriptors(&fsg->function);
2982 static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2984 struct fsg_dev *fsg = fsg_from_func(f);
2985 struct usb_gadget *gadget = c->cdev->gadget;
2991 fsg->gadget = gadget;
2994 i = usb_interface_id(c, f);
2997 fsg_intf_desc.bInterfaceNumber = i;
2998 fsg->interface_number = i;
3000 /* Find all the endpoints we will use */
3001 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
3004 ep->driver_data = fsg->common; /* claim the endpoint */
3007 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
3010 ep->driver_data = fsg->common; /* claim the endpoint */
3013 /* Assume endpoint addresses are the same for both speeds */
3014 fsg_hs_bulk_in_desc.bEndpointAddress =
3015 fsg_fs_bulk_in_desc.bEndpointAddress;
3016 fsg_hs_bulk_out_desc.bEndpointAddress =
3017 fsg_fs_bulk_out_desc.bEndpointAddress;
3019 /* Calculate bMaxBurst, we know packet size is 1024 */
3020 max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
3022 fsg_ss_bulk_in_desc.bEndpointAddress =
3023 fsg_fs_bulk_in_desc.bEndpointAddress;
3024 fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
3026 fsg_ss_bulk_out_desc.bEndpointAddress =
3027 fsg_fs_bulk_out_desc.bEndpointAddress;
3028 fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
3030 ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
3038 ERROR(fsg, "unable to autoconfigure all endpoints\n");
3042 /****************************** ADD FUNCTION ******************************/
3044 static struct usb_gadget_strings *fsg_strings_array[] = {
3049 static int fsg_bind_config(struct usb_composite_dev *cdev,
3050 struct usb_configuration *c,
3051 struct fsg_common *common)
3053 struct fsg_dev *fsg;
3056 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
3060 fsg->function.name = FSG_DRIVER_DESC;
3061 fsg->function.strings = fsg_strings_array;
3062 fsg->function.bind = fsg_bind;
3063 fsg->function.unbind = fsg_unbind;
3064 fsg->function.setup = fsg_setup;
3065 fsg->function.set_alt = fsg_set_alt;
3066 fsg->function.disable = fsg_disable;
3068 fsg->common = common;
3070 * Our caller holds a reference to common structure so we
3071 * don't have to be worry about it being freed until we return
3072 * from this function. So instead of incrementing counter now
3073 * and decrement in error recovery we increment it only when
3074 * call to usb_add_function() was successful.
3077 rc = usb_add_function(c, &fsg->function);
3081 fsg_common_get(fsg->common);
3086 /************************* Module parameters *************************/
3088 struct fsg_module_parameters {
3089 char *file[FSG_MAX_LUNS];
3090 bool ro[FSG_MAX_LUNS];
3091 bool removable[FSG_MAX_LUNS];
3092 bool cdrom[FSG_MAX_LUNS];
3093 bool nofua[FSG_MAX_LUNS];
3095 unsigned int file_count, ro_count, removable_count, cdrom_count;
3096 unsigned int nofua_count;
3097 unsigned int luns; /* nluns */
3098 bool stall; /* can_stall */
3101 #define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
3102 module_param_array_named(prefix ## name, params.name, type, \
3103 &prefix ## params.name ## _count, \
3105 MODULE_PARM_DESC(prefix ## name, desc)
3107 #define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \
3108 module_param_named(prefix ## name, params.name, type, \
3110 MODULE_PARM_DESC(prefix ## name, desc)
3112 #define FSG_MODULE_PARAMETERS(prefix, params) \
3113 _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \
3114 "names of backing files or devices"); \
3115 _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \
3116 "true to force read-only"); \
3117 _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \
3118 "true to simulate removable media"); \
3119 _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
3120 "true to simulate CD-ROM instead of disk"); \
3121 _FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool, \
3122 "true to ignore SCSI WRITE(10,12) FUA bit"); \
3123 _FSG_MODULE_PARAM(prefix, params, luns, uint, \
3124 "number of LUNs"); \
3125 _FSG_MODULE_PARAM(prefix, params, stall, bool, \
3126 "false to prevent bulk stalls")
3129 fsg_config_from_params(struct fsg_config *cfg,
3130 const struct fsg_module_parameters *params)
3132 struct fsg_lun_config *lun;
3135 /* Configure LUNs */
3137 min(params->luns ?: (params->file_count ?: 1u),
3138 (unsigned)FSG_MAX_LUNS);
3139 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
3140 lun->ro = !!params->ro[i];
3141 lun->cdrom = !!params->cdrom[i];
3142 lun->removable = !!params->removable[i];
3144 params->file_count > i && params->file[i][0]
3149 /* Let MSF use defaults */
3150 cfg->vendor_name = 0;
3151 cfg->product_name = 0;
3154 cfg->private_data = NULL;
3157 cfg->can_stall = params->stall;
3160 static inline struct fsg_common *
3161 fsg_common_from_params(struct fsg_common *common,
3162 struct usb_composite_dev *cdev,
3163 const struct fsg_module_parameters *params)
3164 __attribute__((unused));
3165 static inline struct fsg_common *
3166 fsg_common_from_params(struct fsg_common *common,
3167 struct usb_composite_dev *cdev,
3168 const struct fsg_module_parameters *params)
3170 struct fsg_config cfg;
3171 fsg_config_from_params(&cfg, params);
3172 return fsg_common_init(common, cdev, &cfg);