2 * drivers/usb/gadget/f_mass_storage.c
4 * Function Driver for USB Mass Storage
6 * Copyright (C) 2008 Google, Inc.
7 * Author: Mike Lockwood <lockwood@android.com>
9 * Based heavily on the file_storage gadget driver in
10 * drivers/usb/gadget/file_storage.c and licensed under the same terms:
12 * Copyright (C) 2003-2007 Alan Stern
13 * All rights reserved.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions, and the following disclaimer,
20 * without modification.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. The names of the above-listed copyright holders may not be used
25 * to endorse or promote products derived from this software without
26 * specific prior written permission.
28 * ALTERNATIVELY, this software may be distributed under the terms of the
29 * GNU General Public License ("GPL") as published by the Free Software
30 * Foundation, either version 2 of that License or (at your option) any
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
34 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
35 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
36 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
37 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
38 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
39 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
40 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
41 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
42 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
43 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 /* #define VERBOSE_DEBUG */
48 /* #define DUMP_MSGS */
51 #include <linux/blkdev.h>
52 #include <linux/completion.h>
53 #include <linux/dcache.h>
54 #include <linux/delay.h>
55 #include <linux/device.h>
56 #include <linux/fcntl.h>
57 #include <linux/file.h>
59 #include <linux/kref.h>
60 #include <linux/kthread.h>
61 #include <linux/limits.h>
62 #include <linux/rwsem.h>
63 #include <linux/slab.h>
64 #include <linux/spinlock.h>
65 #include <linux/string.h>
66 #include <linux/switch.h>
67 #include <linux/freezer.h>
68 #include <linux/utsname.h>
69 #include <linux/wakelock.h>
70 #include <linux/platform_device.h>
72 #include <linux/usb_usual.h>
73 #include <linux/usb/ch9.h>
74 #include <linux/usb/composite.h>
75 #include <linux/usb/gadget.h>
76 #include <linux/usb/android_composite.h>
78 #include "gadget_chips.h"
81 #define BULK_BUFFER_SIZE 4096
83 /*-------------------------------------------------------------------------*/
85 #define DRIVER_NAME "usb_mass_storage"
88 static const char shortname[] = DRIVER_NAME;
91 #define LDBG(lun, fmt, args...) \
92 dev_dbg(&(lun)->dev , fmt , ## args)
93 #define MDBG(fmt,args...) \
94 printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
96 #define LDBG(lun, fmt, args...) \
98 #define MDBG(fmt,args...) \
107 #define VLDBG(lun, fmt, args...) \
109 #endif /* VERBOSE_DEBUG */
111 #define LERROR(lun, fmt, args...) \
112 dev_err(&(lun)->dev , fmt , ## args)
113 #define LWARN(lun, fmt, args...) \
114 dev_warn(&(lun)->dev , fmt , ## args)
115 #define LINFO(lun, fmt, args...) \
116 dev_info(&(lun)->dev , fmt , ## args)
118 #define MINFO(fmt,args...) \
119 printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
126 #define DBG(d, fmt, args...) \
127 dev_dbg(&(d)->cdev->gadget->dev , fmt , ## args)
128 #define VDBG(d, fmt, args...) \
129 dev_vdbg(&(d)->cdev->gadget->dev , fmt , ## args)
130 #define ERROR(d, fmt, args...) \
131 dev_err(&(d)->cdev->gadget->dev , fmt , ## args)
132 #define WARNING(d, fmt, args...) \
133 dev_warn(&(d)->cdev->gadget->dev , fmt , ## args)
134 #define INFO(d, fmt, args...) \
135 dev_info(&(d)->cdev->gadget->dev , fmt , ## args)
138 /*-------------------------------------------------------------------------*/
140 /* Bulk-only data structures */
142 /* Command Block Wrapper */
143 struct bulk_cb_wrap {
144 __le32 Signature; /* Contains 'USBC' */
145 u32 Tag; /* Unique per command id */
146 __le32 DataTransferLength; /* Size of the data */
147 u8 Flags; /* Direction in bit 7 */
148 u8 Lun; /* LUN (normally 0) */
149 u8 Length; /* Of the CDB, <= MAX_COMMAND_SIZE */
150 u8 CDB[16]; /* Command Data Block */
153 #define USB_BULK_CB_WRAP_LEN 31
154 #define USB_BULK_CB_SIG 0x43425355 /* Spells out USBC */
155 #define USB_BULK_IN_FLAG 0x80
157 /* Command Status Wrapper */
158 struct bulk_cs_wrap {
159 __le32 Signature; /* Should = 'USBS' */
160 u32 Tag; /* Same as original command */
161 __le32 Residue; /* Amount not transferred */
162 u8 Status; /* See below */
165 #define USB_BULK_CS_WRAP_LEN 13
166 #define USB_BULK_CS_SIG 0x53425355 /* Spells out 'USBS' */
167 #define USB_STATUS_PASS 0
168 #define USB_STATUS_FAIL 1
169 #define USB_STATUS_PHASE_ERROR 2
171 /* Bulk-only class specific requests */
172 #define USB_BULK_RESET_REQUEST 0xff
173 #define USB_BULK_GET_MAX_LUN_REQUEST 0xfe
175 /* Length of a SCSI Command Data Block */
176 #define MAX_COMMAND_SIZE 16
178 /* SCSI commands that we recognize */
179 #define SC_FORMAT_UNIT 0x04
180 #define SC_INQUIRY 0x12
181 #define SC_MODE_SELECT_6 0x15
182 #define SC_MODE_SELECT_10 0x55
183 #define SC_MODE_SENSE_6 0x1a
184 #define SC_MODE_SENSE_10 0x5a
185 #define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
186 #define SC_READ_6 0x08
187 #define SC_READ_10 0x28
188 #define SC_READ_12 0xa8
189 #define SC_READ_CAPACITY 0x25
190 #define SC_READ_FORMAT_CAPACITIES 0x23
191 #define SC_RELEASE 0x17
192 #define SC_REQUEST_SENSE 0x03
193 #define SC_RESERVE 0x16
194 #define SC_SEND_DIAGNOSTIC 0x1d
195 #define SC_START_STOP_UNIT 0x1b
196 #define SC_SYNCHRONIZE_CACHE 0x35
197 #define SC_TEST_UNIT_READY 0x00
198 #define SC_VERIFY 0x2f
199 #define SC_WRITE_6 0x0a
200 #define SC_WRITE_10 0x2a
201 #define SC_WRITE_12 0xaa
203 /* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
204 #define SS_NO_SENSE 0
205 #define SS_COMMUNICATION_FAILURE 0x040800
206 #define SS_INVALID_COMMAND 0x052000
207 #define SS_INVALID_FIELD_IN_CDB 0x052400
208 #define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
209 #define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
210 #define SS_MEDIUM_NOT_PRESENT 0x023a00
211 #define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
212 #define SS_NOT_READY_TO_READY_TRANSITION 0x062800
213 #define SS_RESET_OCCURRED 0x062900
214 #define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
215 #define SS_UNRECOVERED_READ_ERROR 0x031100
216 #define SS_WRITE_ERROR 0x030c02
217 #define SS_WRITE_PROTECTED 0x072700
219 #define SK(x) ((u8) ((x) >> 16)) /* Sense Key byte, etc. */
220 #define ASC(x) ((u8) ((x) >> 8))
221 #define ASCQ(x) ((u8) (x))
224 /*-------------------------------------------------------------------------*/
232 unsigned int prevent_medium_removal : 1;
233 unsigned int registered : 1;
234 unsigned int info_valid : 1;
238 u32 unit_attention_data;
243 #define backing_file_is_open(curlun) ((curlun)->filp != NULL)
246 static struct lun *dev_to_lun(struct device *dev)
248 return container_of(dev, struct lun, dev);
251 /* Big enough to hold our biggest descriptor */
252 #define EP0_BUFSIZE 256
254 /* Number of buffers we will use. 2 is enough for double-buffering */
255 #define NUM_BUFFERS 2
257 enum fsg_buffer_state {
265 enum fsg_buffer_state state;
266 struct fsg_buffhd *next;
268 /* The NetChip 2280 is faster, and handles some protocol faults
269 * better, if we don't submit any short bulk-out read requests.
270 * So we will record the intended request length here. */
271 unsigned int bulk_out_intended_length;
273 struct usb_request *inreq;
275 struct usb_request *outreq;
280 /* This one isn't used anywhere */
281 FSG_STATE_COMMAND_PHASE = -10,
283 FSG_STATE_DATA_PHASE,
284 FSG_STATE_STATUS_PHASE,
287 FSG_STATE_ABORT_BULK_OUT,
289 FSG_STATE_CONFIG_CHANGE,
294 enum data_direction {
295 DATA_DIR_UNKNOWN = 0,
302 struct usb_function function;
303 struct usb_composite_dev *cdev;
305 /* optional "usb_mass_storage" platform device */
306 struct platform_device *pdev;
308 /* lock protects: state and all the req_busy's */
311 /* filesem protects: backing files in use */
312 struct rw_semaphore filesem;
314 /* reference counting: wait until all LUNs are released */
317 unsigned int bulk_out_maxpacket;
318 enum fsg_state state; /* For exception handling */
320 u8 config, new_config;
322 unsigned int running : 1;
323 unsigned int bulk_in_enabled : 1;
324 unsigned int bulk_out_enabled : 1;
325 unsigned int phase_error : 1;
326 unsigned int short_packet_received : 1;
327 unsigned int bad_lun_okay : 1;
329 unsigned long atomic_bitflags;
331 #define CLEAR_BULK_HALTS 1
334 struct usb_ep *bulk_in;
335 struct usb_ep *bulk_out;
337 struct fsg_buffhd *next_buffhd_to_fill;
338 struct fsg_buffhd *next_buffhd_to_drain;
339 struct fsg_buffhd buffhds[NUM_BUFFERS];
341 int thread_wakeup_needed;
342 struct completion thread_notifier;
343 struct task_struct *thread_task;
346 u8 cmnd[MAX_COMMAND_SIZE];
347 enum data_direction data_dir;
349 u32 data_size_from_cmnd;
364 struct switch_dev sdev;
366 struct wake_lock wake_lock;
369 static inline struct fsg_dev *func_to_dev(struct usb_function *f)
371 return container_of(f, struct fsg_dev, function);
374 static int exception_in_progress(struct fsg_dev *fsg)
376 return (fsg->state > FSG_STATE_IDLE);
379 /* Make bulk-out requests be divisible by the maxpacket size */
380 static void set_bulk_out_req_length(struct fsg_dev *fsg,
381 struct fsg_buffhd *bh, unsigned int length)
385 bh->bulk_out_intended_length = length;
386 rem = length % fsg->bulk_out_maxpacket;
388 length += fsg->bulk_out_maxpacket - rem;
389 bh->outreq->length = length;
392 static struct fsg_dev *the_fsg;
394 static void close_backing_file(struct fsg_dev *fsg, struct lun *curlun);
395 static void close_all_backing_files(struct fsg_dev *fsg);
398 /*-------------------------------------------------------------------------*/
402 static void dump_msg(struct fsg_dev *fsg, const char *label,
403 const u8 *buf, unsigned int length)
406 DBG(fsg, "%s, length %u:\n", label, length);
407 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
408 16, 1, buf, length, 0);
412 static void dump_cdb(struct fsg_dev *fsg)
417 static void dump_msg(struct fsg_dev *fsg, const char *label,
418 const u8 *buf, unsigned int length)
423 static void dump_cdb(struct fsg_dev *fsg)
425 print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
426 16, 1, fsg->cmnd, fsg->cmnd_size, 0);
431 static void dump_cdb(struct fsg_dev *fsg)
434 #endif /* VERBOSE_DEBUG */
435 #endif /* DUMP_MSGS */
438 /*-------------------------------------------------------------------------*/
440 /* Routines for unaligned data access */
442 static u16 get_be16(u8 *buf)
444 return ((u16) buf[0] << 8) | ((u16) buf[1]);
447 static u32 get_be32(u8 *buf)
449 return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
450 ((u32) buf[2] << 8) | ((u32) buf[3]);
453 static void put_be16(u8 *buf, u16 val)
459 static void put_be32(u8 *buf, u32 val)
467 /*-------------------------------------------------------------------------*/
470 * DESCRIPTORS ... most are static, but strings and (full) configuration
471 * descriptors are built on demand. Also the (static) config and interface
472 * descriptors are adjusted during fsg_bind().
475 /* There is only one interface. */
477 static struct usb_interface_descriptor
479 .bLength = sizeof intf_desc,
480 .bDescriptorType = USB_DT_INTERFACE,
482 .bNumEndpoints = 2, /* Adjusted during fsg_bind() */
483 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
484 .bInterfaceSubClass = US_SC_SCSI,
485 .bInterfaceProtocol = US_PR_BULK,
488 /* Three full-speed endpoint descriptors: bulk-in, bulk-out,
489 * and interrupt-in. */
491 static struct usb_endpoint_descriptor
493 .bLength = USB_DT_ENDPOINT_SIZE,
494 .bDescriptorType = USB_DT_ENDPOINT,
496 .bEndpointAddress = USB_DIR_IN,
497 .bmAttributes = USB_ENDPOINT_XFER_BULK,
498 /* wMaxPacketSize set by autoconfiguration */
501 static struct usb_endpoint_descriptor
503 .bLength = USB_DT_ENDPOINT_SIZE,
504 .bDescriptorType = USB_DT_ENDPOINT,
506 .bEndpointAddress = USB_DIR_OUT,
507 .bmAttributes = USB_ENDPOINT_XFER_BULK,
508 /* wMaxPacketSize set by autoconfiguration */
511 static struct usb_descriptor_header *fs_function[] = {
512 (struct usb_descriptor_header *) &intf_desc,
513 (struct usb_descriptor_header *) &fs_bulk_in_desc,
514 (struct usb_descriptor_header *) &fs_bulk_out_desc,
517 #define FS_FUNCTION_PRE_EP_ENTRIES 2
520 static struct usb_endpoint_descriptor
522 .bLength = USB_DT_ENDPOINT_SIZE,
523 .bDescriptorType = USB_DT_ENDPOINT,
525 /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
526 .bmAttributes = USB_ENDPOINT_XFER_BULK,
527 .wMaxPacketSize = __constant_cpu_to_le16(512),
530 static struct usb_endpoint_descriptor
532 .bLength = USB_DT_ENDPOINT_SIZE,
533 .bDescriptorType = USB_DT_ENDPOINT,
535 /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
536 .bmAttributes = USB_ENDPOINT_XFER_BULK,
537 .wMaxPacketSize = __constant_cpu_to_le16(512),
538 .bInterval = 1, /* NAK every 1 uframe */
542 static struct usb_descriptor_header *hs_function[] = {
543 (struct usb_descriptor_header *) &intf_desc,
544 (struct usb_descriptor_header *) &hs_bulk_in_desc,
545 (struct usb_descriptor_header *) &hs_bulk_out_desc,
549 /* Maxpacket and other transfer characteristics vary by speed. */
550 static struct usb_endpoint_descriptor *
551 ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
552 struct usb_endpoint_descriptor *hs)
554 if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
559 /*-------------------------------------------------------------------------*/
561 /* These routines may be called in process context or in_irq */
563 /* Caller must hold fsg->lock */
564 static void wakeup_thread(struct fsg_dev *fsg)
566 /* Tell the main thread that something has happened */
567 fsg->thread_wakeup_needed = 1;
568 if (fsg->thread_task)
569 wake_up_process(fsg->thread_task);
573 static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
577 DBG(fsg, "raise_exception %d\n", (int)new_state);
578 /* Do nothing if a higher-priority exception is already in progress.
579 * If a lower-or-equal priority exception is in progress, preempt it
580 * and notify the main thread by sending it a signal. */
581 spin_lock_irqsave(&fsg->lock, flags);
582 if (fsg->state <= new_state) {
583 fsg->state = new_state;
584 if (fsg->thread_task)
585 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
588 spin_unlock_irqrestore(&fsg->lock, flags);
592 /*-------------------------------------------------------------------------*/
594 /* Bulk and interrupt endpoint completion handlers.
595 * These always run in_irq. */
597 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
599 struct fsg_dev *fsg = ep->driver_data;
600 struct fsg_buffhd *bh = req->context;
602 if (req->status || req->actual != req->length)
603 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
604 req->status, req->actual, req->length);
606 /* Hold the lock while we update the request and buffer states */
608 spin_lock(&fsg->lock);
610 bh->state = BUF_STATE_EMPTY;
612 spin_unlock(&fsg->lock);
615 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
617 struct fsg_dev *fsg = ep->driver_data;
618 struct fsg_buffhd *bh = req->context;
620 dump_msg(fsg, "bulk-out", req->buf, req->actual);
621 if (req->status || req->actual != bh->bulk_out_intended_length)
622 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
623 req->status, req->actual,
624 bh->bulk_out_intended_length);
626 /* Hold the lock while we update the request and buffer states */
628 spin_lock(&fsg->lock);
630 bh->state = BUF_STATE_FULL;
632 spin_unlock(&fsg->lock);
635 static int fsg_function_setup(struct usb_function *f,
636 const struct usb_ctrlrequest *ctrl)
638 struct fsg_dev *fsg = func_to_dev(f);
639 struct usb_composite_dev *cdev = fsg->cdev;
640 int value = -EOPNOTSUPP;
641 u16 w_index = le16_to_cpu(ctrl->wIndex);
642 u16 w_value = le16_to_cpu(ctrl->wValue);
643 u16 w_length = le16_to_cpu(ctrl->wLength);
645 DBG(fsg, "fsg_function_setup\n");
646 /* Handle Bulk-only class-specific requests */
647 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
648 DBG(fsg, "USB_TYPE_CLASS\n");
649 switch (ctrl->bRequest) {
650 case USB_BULK_RESET_REQUEST:
651 if (ctrl->bRequestType != (USB_DIR_OUT |
652 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
654 if (w_index != 0 || w_value != 0) {
659 /* Raise an exception to stop the current operation
660 * and reinitialize our state. */
661 DBG(fsg, "bulk reset request\n");
662 raise_exception(fsg, FSG_STATE_RESET);
666 case USB_BULK_GET_MAX_LUN_REQUEST:
667 if (ctrl->bRequestType != (USB_DIR_IN |
668 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
670 if (w_index != 0 || w_value != 0) {
674 VDBG(fsg, "get max LUN\n");
675 *(u8 *)cdev->req->buf = fsg->nluns - 1;
681 /* respond with data transfer or status phase? */
684 cdev->req->zero = value < w_length;
685 cdev->req->length = value;
686 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
688 printk("%s setup response queue error\n", __func__);
691 if (value == -EOPNOTSUPP)
693 "unknown class-specific control req "
694 "%02x.%02x v%04x i%04x l%u\n",
695 ctrl->bRequestType, ctrl->bRequest,
696 le16_to_cpu(ctrl->wValue), w_index, w_length);
700 /*-------------------------------------------------------------------------*/
702 /* All the following routines run in process context */
705 /* Use this for bulk or interrupt transfers, not ep0 */
706 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
707 struct usb_request *req, int *pbusy,
708 enum fsg_buffer_state *state)
712 DBG(fsg, "start_transfer req: %p, req->buf: %p\n", req, req->buf);
713 if (ep == fsg->bulk_in)
714 dump_msg(fsg, "bulk-in", req->buf, req->length);
716 spin_lock_irq(&fsg->lock);
718 *state = BUF_STATE_BUSY;
719 spin_unlock_irq(&fsg->lock);
720 rc = usb_ep_queue(ep, req, GFP_KERNEL);
723 *state = BUF_STATE_EMPTY;
725 /* We can't do much more than wait for a reset */
727 /* Note: currently the net2280 driver fails zero-length
728 * submissions if DMA is enabled. */
729 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
731 WARN(fsg, "error in submission: %s --> %d\n",
732 (ep == fsg->bulk_in ? "bulk-in" : "bulk-out"),
738 static int sleep_thread(struct fsg_dev *fsg)
742 /* Wait until a signal arrives or we are woken up */
745 set_current_state(TASK_INTERRUPTIBLE);
746 if (signal_pending(current)) {
750 if (fsg->thread_wakeup_needed)
754 __set_current_state(TASK_RUNNING);
755 fsg->thread_wakeup_needed = 0;
760 /*-------------------------------------------------------------------------*/
762 static int do_read(struct fsg_dev *fsg)
764 struct lun *curlun = fsg->curlun;
766 struct fsg_buffhd *bh;
769 loff_t file_offset, file_offset_tmp;
771 unsigned int partial_page;
774 /* Get the starting Logical Block Address and check that it's
776 if (fsg->cmnd[0] == SC_READ_6)
777 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
779 lba = get_be32(&fsg->cmnd[2]);
781 /* We allow DPO (Disable Page Out = don't save data in the
782 * cache) and FUA (Force Unit Access = don't read from the
783 * cache), but we don't implement them. */
784 if ((fsg->cmnd[1] & ~0x18) != 0) {
785 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
789 if (lba >= curlun->num_sectors) {
790 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
793 file_offset = ((loff_t) lba) << 9;
795 /* Carry out the file reads */
796 amount_left = fsg->data_size_from_cmnd;
797 if (unlikely(amount_left == 0))
798 return -EIO; /* No default reply */
802 /* Figure out how much we need to read:
803 * Try to read the remaining amount.
804 * But don't read more than the buffer size.
805 * And don't try to read past the end of the file.
806 * Finally, if we're not at a page boundary, don't read past
808 * If this means reading 0 then we were asked to read past
809 * the end of file. */
810 amount = min((unsigned int) amount_left,
811 (unsigned int)fsg->buf_size);
812 amount = min((loff_t) amount,
813 curlun->file_length - file_offset);
814 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
815 if (partial_page > 0)
816 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
819 /* Wait for the next buffer to become available */
820 bh = fsg->next_buffhd_to_fill;
821 while (bh->state != BUF_STATE_EMPTY) {
822 rc = sleep_thread(fsg);
827 /* If we were asked to read past the end of file,
828 * end with an empty buffer. */
831 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
832 curlun->sense_data_info = file_offset >> 9;
833 curlun->info_valid = 1;
834 bh->inreq->length = 0;
835 bh->state = BUF_STATE_FULL;
839 /* Perform the read */
840 file_offset_tmp = file_offset;
841 nread = vfs_read(curlun->filp,
842 (char __user *) bh->buf,
843 amount, &file_offset_tmp);
844 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
845 (unsigned long long) file_offset,
847 if (signal_pending(current))
851 LDBG(curlun, "error in file read: %d\n",
854 } else if (nread < amount) {
855 LDBG(curlun, "partial file read: %d/%u\n",
856 (int) nread, amount);
857 nread -= (nread & 511); /* Round down to a block */
859 file_offset += nread;
860 amount_left -= nread;
861 fsg->residue -= nread;
862 bh->inreq->length = nread;
863 bh->state = BUF_STATE_FULL;
865 /* If an error occurred, report it and its position */
866 if (nread < amount) {
867 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
868 curlun->sense_data_info = file_offset >> 9;
869 curlun->info_valid = 1;
873 if (amount_left == 0)
874 break; /* No more left to read */
876 /* Send this buffer and go read some more */
877 start_transfer(fsg, fsg->bulk_in, bh->inreq,
878 &bh->inreq_busy, &bh->state);
879 fsg->next_buffhd_to_fill = bh->next;
882 return -EIO; /* No default reply */
886 /*-------------------------------------------------------------------------*/
888 static int do_write(struct fsg_dev *fsg)
890 struct lun *curlun = fsg->curlun;
892 struct fsg_buffhd *bh;
894 u32 amount_left_to_req, amount_left_to_write;
895 loff_t usb_offset, file_offset, file_offset_tmp;
897 unsigned int partial_page;
902 curlun->sense_data = SS_WRITE_PROTECTED;
905 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
907 /* Get the starting Logical Block Address and check that it's
909 if (fsg->cmnd[0] == SC_WRITE_6)
910 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
912 lba = get_be32(&fsg->cmnd[2]);
914 /* We allow DPO (Disable Page Out = don't save data in the
915 * cache) and FUA (Force Unit Access = write directly to the
916 * medium). We don't implement DPO; we implement FUA by
917 * performing synchronous output. */
918 if ((fsg->cmnd[1] & ~0x18) != 0) {
919 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
922 if (fsg->cmnd[1] & 0x08) /* FUA */
923 curlun->filp->f_flags |= O_SYNC;
925 if (lba >= curlun->num_sectors) {
926 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
930 /* Carry out the file writes */
932 file_offset = usb_offset = ((loff_t) lba) << 9;
933 amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
935 while (amount_left_to_write > 0) {
937 /* Queue a request for more data from the host */
938 bh = fsg->next_buffhd_to_fill;
939 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
941 /* Figure out how much we want to get:
942 * Try to get the remaining amount.
943 * But don't get more than the buffer size.
944 * And don't try to go past the end of the file.
945 * If we're not at a page boundary,
946 * don't go past the next page.
947 * If this means getting 0, then we were asked
948 * to write past the end of file.
949 * Finally, round down to a block boundary. */
950 amount = min(amount_left_to_req, (u32)fsg->buf_size);
951 amount = min((loff_t) amount, curlun->file_length -
953 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
954 if (partial_page > 0)
956 (unsigned int) PAGE_CACHE_SIZE - partial_page);
961 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
962 curlun->sense_data_info = usb_offset >> 9;
963 curlun->info_valid = 1;
966 amount -= (amount & 511);
969 /* Why were we were asked to transfer a
975 /* Get the next buffer */
976 usb_offset += amount;
977 fsg->usb_amount_left -= amount;
978 amount_left_to_req -= amount;
979 if (amount_left_to_req == 0)
982 /* amount is always divisible by 512, hence by
983 * the bulk-out maxpacket size */
984 bh->outreq->length = bh->bulk_out_intended_length =
986 start_transfer(fsg, fsg->bulk_out, bh->outreq,
987 &bh->outreq_busy, &bh->state);
988 fsg->next_buffhd_to_fill = bh->next;
992 /* Write the received data to the backing file */
993 bh = fsg->next_buffhd_to_drain;
994 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
995 break; /* We stopped early */
996 if (bh->state == BUF_STATE_FULL) {
998 fsg->next_buffhd_to_drain = bh->next;
999 bh->state = BUF_STATE_EMPTY;
1001 /* Did something go wrong with the transfer? */
1002 if (bh->outreq->status != 0) {
1003 curlun->sense_data = SS_COMMUNICATION_FAILURE;
1004 curlun->sense_data_info = file_offset >> 9;
1005 curlun->info_valid = 1;
1009 amount = bh->outreq->actual;
1010 if (curlun->file_length - file_offset < amount) {
1012 "write %u @ %llu beyond end %llu\n",
1013 amount, (unsigned long long) file_offset,
1014 (unsigned long long) curlun->file_length);
1015 amount = curlun->file_length - file_offset;
1018 /* Perform the write */
1019 file_offset_tmp = file_offset;
1020 nwritten = vfs_write(curlun->filp,
1021 (char __user *) bh->buf,
1022 amount, &file_offset_tmp);
1023 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
1024 (unsigned long long) file_offset,
1026 if (signal_pending(current))
1027 return -EINTR; /* Interrupted! */
1030 LDBG(curlun, "error in file write: %d\n",
1033 } else if (nwritten < amount) {
1034 LDBG(curlun, "partial file write: %d/%u\n",
1035 (int) nwritten, amount);
1036 nwritten -= (nwritten & 511);
1037 /* Round down to a block */
1039 file_offset += nwritten;
1040 amount_left_to_write -= nwritten;
1041 fsg->residue -= nwritten;
1043 /* If an error occurred, report it and its position */
1044 if (nwritten < amount) {
1045 curlun->sense_data = SS_WRITE_ERROR;
1046 curlun->sense_data_info = file_offset >> 9;
1047 curlun->info_valid = 1;
1051 /* Did the host decide to stop early? */
1052 if (bh->outreq->actual != bh->outreq->length) {
1053 fsg->short_packet_received = 1;
1059 /* Wait for something to happen */
1060 rc = sleep_thread(fsg);
1065 return -EIO; /* No default reply */
1069 /*-------------------------------------------------------------------------*/
1071 /* Sync the file data, don't bother with the metadata.
1072 * The caller must own fsg->filesem.
1073 * This code was copied from fs/buffer.c:sys_fdatasync(). */
1074 static int fsync_sub(struct lun *curlun)
1076 struct file *filp = curlun->filp;
1077 struct inode *inode;
1080 if (curlun->ro || !filp)
1082 if (!filp->f_op->fsync)
1085 inode = filp->f_path.dentry->d_inode;
1086 mutex_lock(&inode->i_mutex);
1087 rc = filemap_fdatawrite(inode->i_mapping);
1088 err = filp->f_op->fsync(filp, 1);
1091 err = filemap_fdatawait(inode->i_mapping);
1094 mutex_unlock(&inode->i_mutex);
1095 VLDBG(curlun, "fdatasync -> %d\n", rc);
1099 static void fsync_all(struct fsg_dev *fsg)
1103 for (i = 0; i < fsg->nluns; ++i)
1104 fsync_sub(&fsg->luns[i]);
1107 static int do_synchronize_cache(struct fsg_dev *fsg)
1109 struct lun *curlun = fsg->curlun;
1112 /* We ignore the requested LBA and write out all file's
1113 * dirty data buffers. */
1114 rc = fsync_sub(curlun);
1116 curlun->sense_data = SS_WRITE_ERROR;
1121 /*-------------------------------------------------------------------------*/
1123 static void invalidate_sub(struct lun *curlun)
1125 struct file *filp = curlun->filp;
1126 struct inode *inode = filp->f_path.dentry->d_inode;
1129 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1130 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
1133 static int do_verify(struct fsg_dev *fsg)
1135 struct lun *curlun = fsg->curlun;
1137 u32 verification_length;
1138 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1139 loff_t file_offset, file_offset_tmp;
1141 unsigned int amount;
1144 /* Get the starting Logical Block Address and check that it's
1146 lba = get_be32(&fsg->cmnd[2]);
1147 if (lba >= curlun->num_sectors) {
1148 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1152 /* We allow DPO (Disable Page Out = don't save data in the
1153 * cache) but we don't implement it. */
1154 if ((fsg->cmnd[1] & ~0x10) != 0) {
1155 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1159 verification_length = get_be16(&fsg->cmnd[7]);
1160 if (unlikely(verification_length == 0))
1161 return -EIO; /* No default reply */
1163 /* Prepare to carry out the file verify */
1164 amount_left = verification_length << 9;
1165 file_offset = ((loff_t) lba) << 9;
1167 /* Write out all the dirty buffers before invalidating them */
1169 if (signal_pending(current))
1172 invalidate_sub(curlun);
1173 if (signal_pending(current))
1176 /* Just try to read the requested blocks */
1177 while (amount_left > 0) {
1179 /* Figure out how much we need to read:
1180 * Try to read the remaining amount, but not more than
1182 * And don't try to read past the end of the file.
1183 * If this means reading 0 then we were asked to read
1184 * past the end of file. */
1185 amount = min((unsigned int) amount_left,
1186 (unsigned int)fsg->buf_size);
1187 amount = min((loff_t) amount,
1188 curlun->file_length - file_offset);
1190 curlun->sense_data =
1191 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1192 curlun->sense_data_info = file_offset >> 9;
1193 curlun->info_valid = 1;
1197 /* Perform the read */
1198 file_offset_tmp = file_offset;
1199 nread = vfs_read(curlun->filp,
1200 (char __user *) bh->buf,
1201 amount, &file_offset_tmp);
1202 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1203 (unsigned long long) file_offset,
1205 if (signal_pending(current))
1209 LDBG(curlun, "error in file verify: %d\n",
1212 } else if (nread < amount) {
1213 LDBG(curlun, "partial file verify: %d/%u\n",
1214 (int) nread, amount);
1215 nread -= (nread & 511); /* Round down to a sector */
1218 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1219 curlun->sense_data_info = file_offset >> 9;
1220 curlun->info_valid = 1;
1223 file_offset += nread;
1224 amount_left -= nread;
1230 /*-------------------------------------------------------------------------*/
1232 static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1234 u8 *buf = (u8 *) bh->buf;
1236 if (!fsg->curlun) { /* Unsupported LUNs are okay */
1237 fsg->bad_lun_okay = 1;
1239 buf[0] = 0x7f; /* Unsupported, no device-type */
1243 memset(buf, 0, 8); /* Non-removable, direct-access device */
1245 buf[1] = 0x80; /* set removable bit */
1246 buf[2] = 2; /* ANSI SCSI level 2 */
1247 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1248 buf[4] = 31; /* Additional length */
1249 /* No special options */
1250 sprintf(buf + 8, "%-8s%-16s%04x", fsg->vendor,
1251 fsg->product, fsg->release);
1256 static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1258 struct lun *curlun = fsg->curlun;
1259 u8 *buf = (u8 *) bh->buf;
1264 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1266 * If a REQUEST SENSE command is received from an initiator
1267 * with a pending unit attention condition (before the target
1268 * generates the contingent allegiance condition), then the
1269 * target shall either:
1270 * a) report any pending sense data and preserve the unit
1271 * attention condition on the logical unit, or,
1272 * b) report the unit attention condition, may discard any
1273 * pending sense data, and clear the unit attention
1274 * condition on the logical unit for that initiator.
1276 * FSG normally uses option a); enable this code to use option b).
1279 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1280 curlun->sense_data = curlun->unit_attention_data;
1281 curlun->unit_attention_data = SS_NO_SENSE;
1285 if (!curlun) { /* Unsupported LUNs are okay */
1286 fsg->bad_lun_okay = 1;
1287 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1291 sd = curlun->sense_data;
1292 sdinfo = curlun->sense_data_info;
1293 valid = curlun->info_valid << 7;
1294 curlun->sense_data = SS_NO_SENSE;
1295 curlun->sense_data_info = 0;
1296 curlun->info_valid = 0;
1300 buf[0] = valid | 0x70; /* Valid, current error */
1302 put_be32(&buf[3], sdinfo); /* Sense information */
1303 buf[7] = 18 - 8; /* Additional sense length */
1310 static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1312 struct lun *curlun = fsg->curlun;
1313 u32 lba = get_be32(&fsg->cmnd[2]);
1314 int pmi = fsg->cmnd[8];
1315 u8 *buf = (u8 *) bh->buf;
1317 /* Check the PMI and LBA fields */
1318 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1319 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1323 put_be32(&buf[0], curlun->num_sectors - 1); /* Max logical block */
1324 put_be32(&buf[4], 512); /* Block length */
1329 static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1331 struct lun *curlun = fsg->curlun;
1332 int mscmnd = fsg->cmnd[0];
1333 u8 *buf = (u8 *) bh->buf;
1336 int changeable_values, all_pages;
1340 if ((fsg->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
1341 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1344 pc = fsg->cmnd[2] >> 6;
1345 page_code = fsg->cmnd[2] & 0x3f;
1347 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1350 changeable_values = (pc == 1);
1351 all_pages = (page_code == 0x3f);
1353 /* Write the mode parameter header. Fixed values are: default
1354 * medium type, no cache control (DPOFUA), and no block descriptors.
1355 * The only variable value is the WriteProtect bit. We will fill in
1356 * the mode data length later. */
1358 if (mscmnd == SC_MODE_SENSE_6) {
1359 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1362 } else { /* SC_MODE_SENSE_10 */
1363 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1368 /* No block descriptors */
1370 /* Disabled to workaround USB reset problems with a Vista host.
1373 /* The mode pages, in numerical order. The only page we support
1374 * is the Caching page. */
1375 if (page_code == 0x08 || all_pages) {
1377 buf[0] = 0x08; /* Page code */
1378 buf[1] = 10; /* Page length */
1379 memset(buf+2, 0, 10); /* None of the fields are changeable */
1381 if (!changeable_values) {
1382 buf[2] = 0x04; /* Write cache enable, */
1383 /* Read cache not disabled */
1384 /* No cache retention priorities */
1385 put_be16(&buf[4], 0xffff); /* Don't disable prefetch */
1386 /* Minimum prefetch = 0 */
1387 put_be16(&buf[8], 0xffff); /* Maximum prefetch */
1388 /* Maximum prefetch ceiling */
1389 put_be16(&buf[10], 0xffff);
1397 /* Check that a valid page was requested and the mode data length
1398 * isn't too long. */
1400 if (!valid_page || len > limit) {
1401 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1405 /* Store the mode data length */
1406 if (mscmnd == SC_MODE_SENSE_6)
1409 put_be16(buf0, len - 2);
1413 static int do_start_stop(struct fsg_dev *fsg)
1415 struct lun *curlun = fsg->curlun;
1418 /* int immed = fsg->cmnd[1] & 0x01; */
1419 loej = fsg->cmnd[4] & 0x02;
1420 start = fsg->cmnd[4] & 0x01;
1423 /* eject request from the host */
1424 if (backing_file_is_open(curlun)) {
1425 close_backing_file(fsg, curlun);
1426 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
1433 static int do_prevent_allow(struct fsg_dev *fsg)
1435 struct lun *curlun = fsg->curlun;
1438 prevent = fsg->cmnd[4] & 0x01;
1439 if ((fsg->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
1440 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1444 if (curlun->prevent_medium_removal && !prevent)
1446 curlun->prevent_medium_removal = prevent;
1451 static int do_read_format_capacities(struct fsg_dev *fsg,
1452 struct fsg_buffhd *bh)
1454 struct lun *curlun = fsg->curlun;
1455 u8 *buf = (u8 *) bh->buf;
1457 buf[0] = buf[1] = buf[2] = 0;
1458 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1461 put_be32(&buf[0], curlun->num_sectors); /* Number of blocks */
1462 put_be32(&buf[4], 512); /* Block length */
1463 buf[4] = 0x02; /* Current capacity */
1468 static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1470 struct lun *curlun = fsg->curlun;
1472 /* We don't support MODE SELECT */
1473 curlun->sense_data = SS_INVALID_COMMAND;
1478 /*-------------------------------------------------------------------------*/
1480 static int write_zero(struct fsg_dev *fsg)
1482 struct fsg_buffhd *bh;
1485 DBG(fsg, "write_zero\n");
1486 /* Wait for the next buffer to become available */
1487 bh = fsg->next_buffhd_to_fill;
1488 while (bh->state != BUF_STATE_EMPTY) {
1489 rc = sleep_thread(fsg);
1494 bh->inreq->length = 0;
1495 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1496 &bh->inreq_busy, &bh->state);
1498 fsg->next_buffhd_to_fill = bh->next;
1503 static int throw_away_data(struct fsg_dev *fsg)
1505 struct fsg_buffhd *bh;
1509 DBG(fsg, "throw_away_data\n");
1510 while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
1511 fsg->usb_amount_left > 0) {
1513 /* Throw away the data in a filled buffer */
1514 if (bh->state == BUF_STATE_FULL) {
1516 bh->state = BUF_STATE_EMPTY;
1517 fsg->next_buffhd_to_drain = bh->next;
1519 /* A short packet or an error ends everything */
1520 if (bh->outreq->actual != bh->outreq->length ||
1521 bh->outreq->status != 0) {
1522 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1528 /* Try to submit another request if we need one */
1529 bh = fsg->next_buffhd_to_fill;
1530 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
1531 amount = min(fsg->usb_amount_left, (u32) fsg->buf_size);
1533 /* amount is always divisible by 512, hence by
1534 * the bulk-out maxpacket size */
1535 bh->outreq->length = bh->bulk_out_intended_length =
1537 start_transfer(fsg, fsg->bulk_out, bh->outreq,
1538 &bh->outreq_busy, &bh->state);
1539 fsg->next_buffhd_to_fill = bh->next;
1540 fsg->usb_amount_left -= amount;
1544 /* Otherwise wait for something to happen */
1545 rc = sleep_thread(fsg);
1553 static int finish_reply(struct fsg_dev *fsg)
1555 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1558 switch (fsg->data_dir) {
1560 break; /* Nothing to send */
1562 case DATA_DIR_UNKNOWN:
1566 /* All but the last buffer of data must have already been sent */
1567 case DATA_DIR_TO_HOST:
1568 if (fsg->data_size == 0)
1569 ; /* Nothing to send */
1571 /* If there's no residue, simply send the last buffer */
1572 else if (fsg->residue == 0) {
1573 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1574 &bh->inreq_busy, &bh->state);
1575 fsg->next_buffhd_to_fill = bh->next;
1577 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1578 &bh->inreq_busy, &bh->state);
1579 fsg->next_buffhd_to_fill = bh->next;
1581 /* this is unnecessary, and was causing problems with MacOS */
1582 if (bh->inreq->length > 0)
1588 /* We have processed all we want from the data the host has sent.
1589 * There may still be outstanding bulk-out requests. */
1590 case DATA_DIR_FROM_HOST:
1591 if (fsg->residue == 0)
1592 ; /* Nothing to receive */
1594 /* Did the host stop sending unexpectedly early? */
1595 else if (fsg->short_packet_received) {
1596 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1600 /* We haven't processed all the incoming data. Even though
1601 * we may be allowed to stall, doing so would cause a race.
1602 * The controller may already have ACK'ed all the remaining
1603 * bulk-out packets, in which case the host wouldn't see a
1604 * STALL. Not realizing the endpoint was halted, it wouldn't
1605 * clear the halt -- leading to problems later on. */
1607 fsg_set_halt(fsg, fsg->bulk_out);
1608 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1612 /* We can't stall. Read in the excess data and throw it
1615 rc = throw_away_data(fsg);
1622 static int send_status(struct fsg_dev *fsg)
1624 struct lun *curlun = fsg->curlun;
1625 struct fsg_buffhd *bh;
1627 u8 status = USB_STATUS_PASS;
1629 struct bulk_cs_wrap *csw;
1631 DBG(fsg, "send_status\n");
1632 /* Wait for the next buffer to become available */
1633 bh = fsg->next_buffhd_to_fill;
1634 while (bh->state != BUF_STATE_EMPTY) {
1635 rc = sleep_thread(fsg);
1641 sd = curlun->sense_data;
1642 sdinfo = curlun->sense_data_info;
1643 } else if (fsg->bad_lun_okay)
1646 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1648 if (fsg->phase_error) {
1649 DBG(fsg, "sending phase-error status\n");
1650 status = USB_STATUS_PHASE_ERROR;
1651 sd = SS_INVALID_COMMAND;
1652 } else if (sd != SS_NO_SENSE) {
1653 DBG(fsg, "sending command-failure status\n");
1654 status = USB_STATUS_FAIL;
1655 VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1657 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1662 /* Store and send the Bulk-only CSW */
1663 csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
1664 csw->Tag = fsg->tag;
1665 csw->Residue = cpu_to_le32(fsg->residue);
1666 csw->Status = status;
1668 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1669 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1670 &bh->inreq_busy, &bh->state);
1672 fsg->next_buffhd_to_fill = bh->next;
1677 /*-------------------------------------------------------------------------*/
1679 /* Check whether the command is properly formed and whether its data size
1680 * and direction agree with the values we already have. */
1681 static int check_command(struct fsg_dev *fsg, int cmnd_size,
1682 enum data_direction data_dir, unsigned int mask,
1683 int needs_medium, const char *name)
1686 int lun = fsg->cmnd[1] >> 5;
1687 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1692 if (fsg->data_dir != DATA_DIR_UNKNOWN)
1693 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
1695 VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1696 name, cmnd_size, dirletter[(int) data_dir],
1697 fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
1699 /* We can't reply at all until we know the correct data direction
1701 if (fsg->data_size_from_cmnd == 0)
1702 data_dir = DATA_DIR_NONE;
1703 if (fsg->data_dir == DATA_DIR_UNKNOWN) { /* CB or CBI */
1704 fsg->data_dir = data_dir;
1705 fsg->data_size = fsg->data_size_from_cmnd;
1707 } else { /* Bulk-only */
1708 if (fsg->data_size < fsg->data_size_from_cmnd) {
1710 /* Host data size < Device data size is a phase error.
1711 * Carry out the command, but only transfer as much
1712 * as we are allowed. */
1713 DBG(fsg, "phase error 1\n");
1714 fsg->data_size_from_cmnd = fsg->data_size;
1715 fsg->phase_error = 1;
1718 fsg->residue = fsg->usb_amount_left = fsg->data_size;
1720 /* Conflicting data directions is a phase error */
1721 if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
1722 fsg->phase_error = 1;
1723 DBG(fsg, "phase error 2\n");
1727 /* Verify the length of the command itself */
1728 if (cmnd_size != fsg->cmnd_size) {
1730 /* Special case workaround: MS-Windows issues REQUEST SENSE
1731 * with cbw->Length == 12 (it should be 6). */
1732 if (fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
1733 cmnd_size = fsg->cmnd_size;
1735 fsg->phase_error = 1;
1740 /* Check that the LUN values are consistent */
1741 if (fsg->lun != lun)
1742 DBG(fsg, "using LUN %d from CBW, "
1743 "not LUN %d from CDB\n",
1747 if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
1748 fsg->curlun = curlun = &fsg->luns[fsg->lun];
1749 if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
1750 curlun->sense_data = SS_NO_SENSE;
1751 curlun->sense_data_info = 0;
1752 curlun->info_valid = 0;
1755 fsg->curlun = curlun = NULL;
1756 fsg->bad_lun_okay = 0;
1758 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1759 * to use unsupported LUNs; all others may not. */
1760 if (fsg->cmnd[0] != SC_INQUIRY &&
1761 fsg->cmnd[0] != SC_REQUEST_SENSE) {
1762 DBG(fsg, "unsupported LUN %d\n", fsg->lun);
1767 /* If a unit attention condition exists, only INQUIRY and
1768 * REQUEST SENSE commands are allowed; anything else must fail. */
1769 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1770 fsg->cmnd[0] != SC_INQUIRY &&
1771 fsg->cmnd[0] != SC_REQUEST_SENSE) {
1772 curlun->sense_data = curlun->unit_attention_data;
1773 curlun->unit_attention_data = SS_NO_SENSE;
1777 /* Check that only command bytes listed in the mask are non-zero */
1778 fsg->cmnd[1] &= 0x1f; /* Mask away the LUN */
1779 for (i = 1; i < cmnd_size; ++i) {
1780 if (fsg->cmnd[i] && !(mask & (1 << i))) {
1782 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1783 DBG(fsg, "SS_INVALID_FIELD_IN_CDB\n");
1788 /* If the medium isn't mounted and the command needs to access
1789 * it, return an error. */
1790 if (curlun && !backing_file_is_open(curlun) && needs_medium) {
1791 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1792 DBG(fsg, "SS_MEDIUM_NOT_PRESENT\n");
1800 static int do_scsi_command(struct fsg_dev *fsg)
1802 struct fsg_buffhd *bh;
1804 int reply = -EINVAL;
1806 static char unknown[16];
1810 /* Wait for the next buffer to become available for data or status */
1811 bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
1812 while (bh->state != BUF_STATE_EMPTY) {
1813 rc = sleep_thread(fsg);
1817 fsg->phase_error = 0;
1818 fsg->short_packet_received = 0;
1820 down_read(&fsg->filesem); /* We're using the backing file */
1821 switch (fsg->cmnd[0]) {
1824 fsg->data_size_from_cmnd = fsg->cmnd[4];
1825 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1828 reply = do_inquiry(fsg, bh);
1831 case SC_MODE_SELECT_6:
1832 fsg->data_size_from_cmnd = fsg->cmnd[4];
1833 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
1835 "MODE SELECT(6)")) == 0)
1836 reply = do_mode_select(fsg, bh);
1839 case SC_MODE_SELECT_10:
1840 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
1841 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
1843 "MODE SELECT(10)")) == 0)
1844 reply = do_mode_select(fsg, bh);
1847 case SC_MODE_SENSE_6:
1848 fsg->data_size_from_cmnd = fsg->cmnd[4];
1849 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1850 (1<<1) | (1<<2) | (1<<4), 0,
1851 "MODE SENSE(6)")) == 0)
1852 reply = do_mode_sense(fsg, bh);
1855 case SC_MODE_SENSE_10:
1856 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
1857 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1858 (1<<1) | (1<<2) | (3<<7), 0,
1859 "MODE SENSE(10)")) == 0)
1860 reply = do_mode_sense(fsg, bh);
1863 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1864 fsg->data_size_from_cmnd = 0;
1865 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
1867 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
1868 reply = do_prevent_allow(fsg);
1873 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1874 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1877 reply = do_read(fsg);
1881 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
1882 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1883 (1<<1) | (0xf<<2) | (3<<7), 1,
1885 reply = do_read(fsg);
1889 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
1890 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
1891 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1893 reply = do_read(fsg);
1896 case SC_READ_CAPACITY:
1897 fsg->data_size_from_cmnd = 8;
1898 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1899 (0xf<<2) | (1<<8), 1,
1900 "READ CAPACITY")) == 0)
1901 reply = do_read_capacity(fsg, bh);
1904 case SC_READ_FORMAT_CAPACITIES:
1905 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
1906 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1908 "READ FORMAT CAPACITIES")) == 0)
1909 reply = do_read_format_capacities(fsg, bh);
1912 case SC_REQUEST_SENSE:
1913 fsg->data_size_from_cmnd = fsg->cmnd[4];
1914 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1916 "REQUEST SENSE")) == 0)
1917 reply = do_request_sense(fsg, bh);
1920 case SC_START_STOP_UNIT:
1921 fsg->data_size_from_cmnd = 0;
1922 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
1924 "START-STOP UNIT")) == 0)
1925 reply = do_start_stop(fsg);
1928 case SC_SYNCHRONIZE_CACHE:
1929 fsg->data_size_from_cmnd = 0;
1930 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
1931 (0xf<<2) | (3<<7), 1,
1932 "SYNCHRONIZE CACHE")) == 0)
1933 reply = do_synchronize_cache(fsg);
1936 case SC_TEST_UNIT_READY:
1937 fsg->data_size_from_cmnd = 0;
1938 reply = check_command(fsg, 6, DATA_DIR_NONE,
1943 /* Although optional, this command is used by MS-Windows. We
1944 * support a minimal version: BytChk must be 0. */
1946 fsg->data_size_from_cmnd = 0;
1947 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
1948 (1<<1) | (0xf<<2) | (3<<7), 1,
1950 reply = do_verify(fsg);
1955 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1956 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
1959 reply = do_write(fsg);
1963 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
1964 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
1965 (1<<1) | (0xf<<2) | (3<<7), 1,
1967 reply = do_write(fsg);
1971 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
1972 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
1973 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1975 reply = do_write(fsg);
1978 /* Some mandatory commands that we recognize but don't implement.
1979 * They don't mean much in this setting. It's left as an exercise
1980 * for anyone interested to implement RESERVE and RELEASE in terms
1981 * of Posix locks. */
1982 case SC_FORMAT_UNIT:
1985 case SC_SEND_DIAGNOSTIC:
1989 fsg->data_size_from_cmnd = 0;
1990 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
1991 if ((reply = check_command(fsg, fsg->cmnd_size,
1992 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
1993 fsg->curlun->sense_data = SS_INVALID_COMMAND;
1998 up_read(&fsg->filesem);
2000 VDBG(fsg, "reply: %d, fsg->data_size_from_cmnd: %d\n",
2001 reply, fsg->data_size_from_cmnd);
2002 if (reply == -EINTR || signal_pending(current))
2005 /* Set up the single reply buffer for finish_reply() */
2006 if (reply == -EINVAL)
2007 reply = 0; /* Error reply length */
2008 if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
2009 reply = min((u32) reply, fsg->data_size_from_cmnd);
2010 bh->inreq->length = reply;
2011 bh->state = BUF_STATE_FULL;
2012 fsg->residue -= reply;
2013 } /* Otherwise it's already set */
2019 /*-------------------------------------------------------------------------*/
2021 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2023 struct usb_request *req = bh->outreq;
2024 struct bulk_cb_wrap *cbw = req->buf;
2026 /* Was this a real packet? */
2030 /* Is the CBW valid? */
2031 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2032 cbw->Signature != __constant_cpu_to_le32(
2034 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2036 le32_to_cpu(cbw->Signature));
2040 /* Is the CBW meaningful? */
2041 if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2042 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2043 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2045 cbw->Lun, cbw->Flags, cbw->Length);
2049 /* Save the command for later */
2050 fsg->cmnd_size = cbw->Length;
2051 memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
2052 if (cbw->Flags & USB_BULK_IN_FLAG)
2053 fsg->data_dir = DATA_DIR_TO_HOST;
2055 fsg->data_dir = DATA_DIR_FROM_HOST;
2056 fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
2057 if (fsg->data_size == 0)
2058 fsg->data_dir = DATA_DIR_NONE;
2059 fsg->lun = cbw->Lun;
2060 fsg->tag = cbw->Tag;
2065 static int get_next_command(struct fsg_dev *fsg)
2067 struct fsg_buffhd *bh;
2070 /* Wait for the next buffer to become available */
2071 bh = fsg->next_buffhd_to_fill;
2072 while (bh->state != BUF_STATE_EMPTY) {
2073 rc = sleep_thread(fsg);
2075 usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2076 bh->outreq_busy = 0;
2077 bh->state = BUF_STATE_EMPTY;
2082 /* Queue a request to read a Bulk-only CBW */
2083 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
2084 start_transfer(fsg, fsg->bulk_out, bh->outreq,
2085 &bh->outreq_busy, &bh->state);
2087 /* We will drain the buffer in software, which means we
2088 * can reuse it for the next filling. No need to advance
2089 * next_buffhd_to_fill. */
2091 /* Wait for the CBW to arrive */
2092 while (bh->state != BUF_STATE_FULL) {
2093 rc = sleep_thread(fsg);
2095 usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2096 bh->outreq_busy = 0;
2097 bh->state = BUF_STATE_EMPTY;
2102 rc = received_cbw(fsg, bh);
2103 bh->state = BUF_STATE_EMPTY;
2109 /*-------------------------------------------------------------------------*/
2111 static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
2112 const struct usb_endpoint_descriptor *d)
2116 DBG(fsg, "usb_ep_enable %s\n", ep->name);
2117 ep->driver_data = fsg;
2118 rc = usb_ep_enable(ep, d);
2120 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
2124 static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
2125 struct usb_request **preq)
2127 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2130 ERROR(fsg, "can't allocate request for %s\n", ep->name);
2135 * Reset interface setting and re-init endpoint state (toggle etc).
2136 * Call with altsetting < 0 to disable the interface. The only other
2137 * available altsetting is 0, which enables the interface.
2139 static int do_set_interface(struct fsg_dev *fsg, int altsetting)
2141 struct usb_composite_dev *cdev = fsg->cdev;
2144 const struct usb_endpoint_descriptor *d;
2147 DBG(fsg, "reset interface\n");
2149 /* Disable the endpoints */
2150 if (fsg->bulk_in_enabled) {
2151 DBG(fsg, "usb_ep_disable %s\n", fsg->bulk_in->name);
2152 usb_ep_disable(fsg->bulk_in);
2153 fsg->bulk_in_enabled = 0;
2155 if (fsg->bulk_out_enabled) {
2156 DBG(fsg, "usb_ep_disable %s\n", fsg->bulk_out->name);
2157 usb_ep_disable(fsg->bulk_out);
2158 fsg->bulk_out_enabled = 0;
2161 /* Deallocate the requests */
2162 for (i = 0; i < NUM_BUFFERS; ++i) {
2163 struct fsg_buffhd *bh = &fsg->buffhds[i];
2165 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2169 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2176 if (altsetting < 0 || rc != 0)
2179 DBG(fsg, "set interface %d\n", altsetting);
2181 /* Enable the endpoints */
2182 d = ep_desc(cdev->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
2183 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
2185 fsg->bulk_in_enabled = 1;
2187 d = ep_desc(cdev->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
2188 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
2190 fsg->bulk_out_enabled = 1;
2191 fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2193 /* Allocate the requests */
2194 for (i = 0; i < NUM_BUFFERS; ++i) {
2195 struct fsg_buffhd *bh = &fsg->buffhds[i];
2197 rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq);
2200 rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq);
2203 bh->inreq->buf = bh->outreq->buf = bh->buf;
2204 bh->inreq->context = bh->outreq->context = bh;
2205 bh->inreq->complete = bulk_in_complete;
2206 bh->outreq->complete = bulk_out_complete;
2210 for (i = 0; i < fsg->nluns; ++i)
2211 fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2216 static void adjust_wake_lock(struct fsg_dev *fsg)
2221 spin_lock_irq(&fsg->lock);
2224 for (i = 0; i < fsg->nluns; ++i) {
2225 if (backing_file_is_open(&fsg->luns[i]))
2231 wake_lock(&fsg->wake_lock);
2233 wake_unlock(&fsg->wake_lock);
2235 spin_unlock_irq(&fsg->lock);
2239 * Change our operational configuration. This code must agree with the code
2240 * that returns config descriptors, and with interface altsetting code.
2242 * It's also responsible for power management interactions. Some
2243 * configurations might not work with our current power sources.
2244 * For now we just assume the gadget is always self-powered.
2246 static int do_set_config(struct fsg_dev *fsg, u8 new_config)
2250 if (new_config == fsg->config)
2253 /* Disable the single interface */
2254 if (fsg->config != 0) {
2255 DBG(fsg, "reset config\n");
2259 /* Enable the interface */
2260 if (new_config != 0)
2261 fsg->config = new_config;
2263 switch_set_state(&fsg->sdev, new_config);
2264 adjust_wake_lock(fsg);
2269 /*-------------------------------------------------------------------------*/
2271 static void handle_exception(struct fsg_dev *fsg)
2276 struct fsg_buffhd *bh;
2277 enum fsg_state old_state;
2282 DBG(fsg, "handle_exception state: %d\n", (int)fsg->state);
2283 /* Clear the existing signals. Anything but SIGUSR1 is converted
2284 * into a high-priority EXIT exception. */
2286 sig = dequeue_signal_lock(current, ¤t->blocked, &info);
2289 if (sig != SIGUSR1) {
2290 if (fsg->state < FSG_STATE_EXIT)
2291 DBG(fsg, "Main thread exiting on signal\n");
2292 raise_exception(fsg, FSG_STATE_EXIT);
2297 * Do NOT flush the fifo after set_interface()
2298 * Otherwise, it results in some data being lost
2300 if ((fsg->state != FSG_STATE_CONFIG_CHANGE) ||
2301 (fsg->new_config != 1)) {
2302 /* Clear out the controller's fifos */
2303 if (fsg->bulk_in_enabled)
2304 usb_ep_fifo_flush(fsg->bulk_in);
2305 if (fsg->bulk_out_enabled)
2306 usb_ep_fifo_flush(fsg->bulk_out);
2308 /* Reset the I/O buffer states and pointers, the SCSI
2309 * state, and the exception. Then invoke the handler. */
2310 spin_lock_irq(&fsg->lock);
2312 for (i = 0; i < NUM_BUFFERS; ++i) {
2313 bh = &fsg->buffhds[i];
2314 bh->state = BUF_STATE_EMPTY;
2316 fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
2319 new_config = fsg->new_config;
2320 old_state = fsg->state;
2322 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2323 fsg->state = FSG_STATE_STATUS_PHASE;
2325 for (i = 0; i < fsg->nluns; ++i) {
2326 curlun = &fsg->luns[i];
2327 curlun->prevent_medium_removal = 0;
2328 curlun->sense_data = curlun->unit_attention_data =
2330 curlun->sense_data_info = 0;
2331 curlun->info_valid = 0;
2333 fsg->state = FSG_STATE_IDLE;
2335 spin_unlock_irq(&fsg->lock);
2337 /* Carry out any extra actions required for the exception */
2338 switch (old_state) {
2342 case FSG_STATE_ABORT_BULK_OUT:
2343 DBG(fsg, "FSG_STATE_ABORT_BULK_OUT\n");
2344 spin_lock_irq(&fsg->lock);
2345 if (fsg->state == FSG_STATE_STATUS_PHASE)
2346 fsg->state = FSG_STATE_IDLE;
2347 spin_unlock_irq(&fsg->lock);
2350 case FSG_STATE_RESET:
2351 /* really not much to do here */
2354 case FSG_STATE_CONFIG_CHANGE:
2355 rc = do_set_config(fsg, new_config);
2356 if (new_config == 0) {
2357 /* We're using the backing file */
2358 down_read(&fsg->filesem);
2360 up_read(&fsg->filesem);
2364 case FSG_STATE_EXIT:
2365 case FSG_STATE_TERMINATED:
2367 fsg->new_config = 0;
2368 do_set_interface(fsg, -1);
2370 do_set_config(fsg, 0); /* Free resources */
2371 spin_lock_irq(&fsg->lock);
2372 fsg->state = FSG_STATE_TERMINATED; /* Stop the thread */
2373 spin_unlock_irq(&fsg->lock);
2379 /*-------------------------------------------------------------------------*/
2381 static int fsg_main_thread(void *fsg_)
2383 struct fsg_dev *fsg = fsg_;
2385 /* Allow the thread to be killed by a signal, but set the signal mask
2386 * to block everything but INT, TERM, KILL, and USR1. */
2387 allow_signal(SIGINT);
2388 allow_signal(SIGTERM);
2389 allow_signal(SIGKILL);
2390 allow_signal(SIGUSR1);
2392 /* Allow the thread to be frozen */
2395 /* Arrange for userspace references to be interpreted as kernel
2396 * pointers. That way we can pass a kernel pointer to a routine
2397 * that expects a __user pointer and it will work okay. */
2401 while (fsg->state != FSG_STATE_TERMINATED) {
2402 if (exception_in_progress(fsg) || signal_pending(current)) {
2403 handle_exception(fsg);
2407 if (!fsg->running) {
2412 if (get_next_command(fsg))
2415 spin_lock_irq(&fsg->lock);
2416 if (!exception_in_progress(fsg))
2417 fsg->state = FSG_STATE_DATA_PHASE;
2418 spin_unlock_irq(&fsg->lock);
2420 if (do_scsi_command(fsg) || finish_reply(fsg))
2423 spin_lock_irq(&fsg->lock);
2424 if (!exception_in_progress(fsg))
2425 fsg->state = FSG_STATE_STATUS_PHASE;
2426 spin_unlock_irq(&fsg->lock);
2428 if (send_status(fsg))
2431 spin_lock_irq(&fsg->lock);
2432 if (!exception_in_progress(fsg))
2433 fsg->state = FSG_STATE_IDLE;
2434 spin_unlock_irq(&fsg->lock);
2437 spin_lock_irq(&fsg->lock);
2438 fsg->thread_task = NULL;
2439 spin_unlock_irq(&fsg->lock);
2441 /* In case we are exiting because of a signal, unregister the
2442 * gadget driver and close the backing file. */
2443 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
2444 close_all_backing_files(fsg);
2446 /* Let the unbind and cleanup routines know the thread has exited */
2447 complete_and_exit(&fsg->thread_notifier, 0);
2451 /*-------------------------------------------------------------------------*/
2453 /* If the next two routines are called while the gadget is registered,
2454 * the caller must own fsg->filesem for writing. */
2456 static int open_backing_file(struct fsg_dev *fsg, struct lun *curlun,
2457 const char *filename)
2460 struct file *filp = NULL;
2462 struct inode *inode = NULL;
2466 /* R/W if we can, R/O if we must */
2469 filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
2470 if (-EROFS == PTR_ERR(filp))
2474 filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
2476 LINFO(curlun, "unable to open backing file: %s\n", filename);
2477 return PTR_ERR(filp);
2480 if (!(filp->f_mode & FMODE_WRITE))
2483 if (filp->f_path.dentry)
2484 inode = filp->f_path.dentry->d_inode;
2485 if (inode && S_ISBLK(inode->i_mode)) {
2486 if (bdev_read_only(inode->i_bdev))
2488 } else if (!inode || !S_ISREG(inode->i_mode)) {
2489 LINFO(curlun, "invalid file type: %s\n", filename);
2493 /* If we can't read the file, it's no good.
2494 * If we can't write the file, use it read-only. */
2495 if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
2496 LINFO(curlun, "file not readable: %s\n", filename);
2499 if (!(filp->f_op->write || filp->f_op->aio_write))
2502 size = i_size_read(inode->i_mapping->host);
2504 LINFO(curlun, "unable to find file size: %s\n", filename);
2508 num_sectors = size >> 9; /* File size in 512-byte sectors */
2509 if (num_sectors == 0) {
2510 LINFO(curlun, "file too small: %s\n", filename);
2517 curlun->filp = filp;
2518 curlun->file_length = size;
2519 curlun->num_sectors = num_sectors;
2520 LDBG(curlun, "open backing file: %s size: %lld num_sectors: %lld\n",
2521 filename, size, num_sectors);
2523 adjust_wake_lock(fsg);
2526 filp_close(filp, current->files);
2531 static void close_backing_file(struct fsg_dev *fsg, struct lun *curlun)
2537 * XXX: San: Ugly hack here added to ensure that
2538 * our pages get synced to disk.
2539 * Also drop caches here just to be extra-safe
2541 rc = vfs_fsync(curlun->filp, 1);
2543 printk(KERN_ERR "ums: Error syncing data (%d)\n", rc);
2544 /* drop_pagecache and drop_slab are no longer available */
2545 /* drop_pagecache(); */
2548 LDBG(curlun, "close backing file\n");
2550 curlun->filp = NULL;
2551 adjust_wake_lock(fsg);
2555 static void close_all_backing_files(struct fsg_dev *fsg)
2559 for (i = 0; i < fsg->nluns; ++i)
2560 close_backing_file(fsg, &fsg->luns[i]);
2563 static ssize_t show_file(struct device *dev, struct device_attribute *attr,
2566 struct lun *curlun = dev_to_lun(dev);
2567 struct fsg_dev *fsg = dev_get_drvdata(dev);
2571 down_read(&fsg->filesem);
2572 if (backing_file_is_open(curlun)) { /* Get the complete pathname */
2573 p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
2578 memmove(buf, p, rc);
2579 buf[rc] = '\n'; /* Add a newline */
2582 } else { /* No file, return 0 bytes */
2586 up_read(&fsg->filesem);
2590 static ssize_t store_file(struct device *dev, struct device_attribute *attr,
2591 const char *buf, size_t count)
2593 struct lun *curlun = dev_to_lun(dev);
2594 struct fsg_dev *fsg = dev_get_drvdata(dev);
2597 DBG(fsg, "store_file: \"%s\"\n", buf);
2599 /* disabled because we need to allow closing the backing file if the media was removed */
2600 if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
2601 LDBG(curlun, "eject attempt prevented\n");
2602 return -EBUSY; /* "Door is locked" */
2606 /* Remove a trailing newline */
2607 if (count > 0 && buf[count-1] == '\n')
2608 ((char *) buf)[count-1] = 0;
2610 /* Eject current medium */
2611 down_write(&fsg->filesem);
2612 if (backing_file_is_open(curlun)) {
2613 close_backing_file(fsg, curlun);
2614 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2617 /* Load new medium */
2618 if (count > 0 && buf[0]) {
2619 rc = open_backing_file(fsg, curlun, buf);
2621 curlun->unit_attention_data =
2622 SS_NOT_READY_TO_READY_TRANSITION;
2624 up_write(&fsg->filesem);
2625 return (rc < 0 ? rc : count);
2629 static DEVICE_ATTR(file, 0444, show_file, store_file);
2631 /*-------------------------------------------------------------------------*/
2633 static void fsg_release(struct kref *ref)
2635 struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref);
2641 static void lun_release(struct device *dev)
2643 struct fsg_dev *fsg = dev_get_drvdata(dev);
2645 kref_put(&fsg->ref, fsg_release);
2649 /*-------------------------------------------------------------------------*/
2651 static int __init fsg_alloc(void)
2653 struct fsg_dev *fsg;
2655 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
2658 spin_lock_init(&fsg->lock);
2659 init_rwsem(&fsg->filesem);
2660 kref_init(&fsg->ref);
2661 init_completion(&fsg->thread_notifier);
2667 static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
2669 return sprintf(buf, "%s\n", DRIVER_NAME);
2672 static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
2674 struct fsg_dev *fsg = container_of(sdev, struct fsg_dev, sdev);
2675 return sprintf(buf, "%s\n", (fsg->config ? "online" : "offline"));
2679 fsg_function_unbind(struct usb_configuration *c, struct usb_function *f)
2681 struct fsg_dev *fsg = func_to_dev(f);
2685 DBG(fsg, "fsg_function_unbind\n");
2686 clear_bit(REGISTERED, &fsg->atomic_bitflags);
2688 /* Unregister the sysfs attribute files and the LUNs */
2689 for (i = 0; i < fsg->nluns; ++i) {
2690 curlun = &fsg->luns[i];
2691 if (curlun->registered) {
2692 device_remove_file(&curlun->dev, &dev_attr_file);
2693 device_unregister(&curlun->dev);
2694 curlun->registered = 0;
2698 /* If the thread isn't already dead, tell it to exit now */
2699 if (fsg->state != FSG_STATE_TERMINATED) {
2700 raise_exception(fsg, FSG_STATE_EXIT);
2701 wait_for_completion(&fsg->thread_notifier);
2703 /* The cleanup routine waits for this completion also */
2704 complete(&fsg->thread_notifier);
2707 /* Free the data buffers */
2708 for (i = 0; i < NUM_BUFFERS; ++i)
2709 kfree(fsg->buffhds[i].buf);
2710 switch_dev_unregister(&fsg->sdev);
2714 fsg_function_bind(struct usb_configuration *c, struct usb_function *f)
2716 struct usb_composite_dev *cdev = c->cdev;
2717 struct fsg_dev *fsg = func_to_dev(f);
2726 DBG(fsg, "fsg_function_bind\n");
2728 dev_attr_file.attr.mode = 0644;
2730 /* Find out how many LUNs there should be */
2735 ERROR(fsg, "invalid number of LUNs: %d\n", i);
2740 /* Create the LUNs, open their backing files, and register the
2741 * LUN devices in sysfs. */
2742 fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL);
2749 for (i = 0; i < fsg->nluns; ++i) {
2750 curlun = &fsg->luns[i];
2752 curlun->dev.release = lun_release;
2753 /* use "usb_mass_storage" platform device as parent if available */
2755 curlun->dev.parent = &fsg->pdev->dev;
2757 curlun->dev.parent = &cdev->gadget->dev;
2758 dev_set_drvdata(&curlun->dev, fsg);
2759 snprintf(curlun->dev.bus_id, BUS_ID_SIZE,
2762 rc = device_register(&curlun->dev);
2764 INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
2767 rc = device_create_file(&curlun->dev, &dev_attr_file);
2769 ERROR(fsg, "device_create_file failed: %d\n", rc);
2770 device_unregister(&curlun->dev);
2773 curlun->registered = 1;
2774 kref_get(&fsg->ref);
2777 /* allocate interface ID(s) */
2778 id = usb_interface_id(c, f);
2781 intf_desc.bInterfaceNumber = id;
2783 ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
2786 ep->driver_data = fsg; /* claim the endpoint */
2789 ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
2792 ep->driver_data = fsg; /* claim the endpoint */
2797 if (gadget_is_dualspeed(cdev->gadget)) {
2798 /* Assume endpoint addresses are the same for both speeds */
2799 hs_bulk_in_desc.bEndpointAddress =
2800 fs_bulk_in_desc.bEndpointAddress;
2801 hs_bulk_out_desc.bEndpointAddress =
2802 fs_bulk_out_desc.bEndpointAddress;
2804 f->hs_descriptors = hs_function;
2807 /* Allocate the data buffers */
2808 for (i = 0; i < NUM_BUFFERS; ++i) {
2809 struct fsg_buffhd *bh = &fsg->buffhds[i];
2811 /* Allocate for the bulk-in endpoint. We assume that
2812 * the buffer will also work with the bulk-out (and
2813 * interrupt-in) endpoint. */
2814 bh->buf = kmalloc(fsg->buf_size, GFP_KERNEL);
2819 fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
2821 fsg->thread_task = kthread_create(fsg_main_thread, fsg,
2823 if (IS_ERR(fsg->thread_task)) {
2824 rc = PTR_ERR(fsg->thread_task);
2825 ERROR(fsg, "kthread_create failed: %d\n", rc);
2829 INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
2831 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2832 for (i = 0; i < fsg->nluns; ++i) {
2833 curlun = &fsg->luns[i];
2834 if (backing_file_is_open(curlun)) {
2837 p = d_path(&curlun->filp->f_path,
2842 LINFO(curlun, "ro=%d, file: %s\n",
2843 curlun->ro, (p ? p : "(error)"));
2848 set_bit(REGISTERED, &fsg->atomic_bitflags);
2850 /* Tell the thread to start working */
2851 wake_up_process(fsg->thread_task);
2855 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2859 DBG(fsg, "fsg_function_bind failed: %d\n", rc);
2860 fsg->state = FSG_STATE_TERMINATED; /* The thread is dead */
2861 fsg_function_unbind(c, f);
2862 close_all_backing_files(fsg);
2866 static int fsg_function_set_alt(struct usb_function *f,
2867 unsigned intf, unsigned alt)
2869 struct fsg_dev *fsg = func_to_dev(f);
2870 DBG(fsg, "fsg_function_set_alt intf: %d alt: %d\n", intf, alt);
2871 fsg->new_config = 1;
2872 do_set_interface(fsg, 0);
2873 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
2877 static void fsg_function_disable(struct usb_function *f)
2879 struct fsg_dev *fsg = func_to_dev(f);
2880 DBG(fsg, "fsg_function_disable\n");
2881 if (fsg->new_config)
2882 do_set_interface(fsg, -1);
2883 fsg->new_config = 0;
2884 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
2887 static int __init fsg_probe(struct platform_device *pdev)
2889 struct usb_mass_storage_platform_data *pdata = pdev->dev.platform_data;
2890 struct fsg_dev *fsg = the_fsg;
2893 printk(KERN_INFO "fsg_probe pdata: %p\n", pdata);
2897 fsg->vendor = pdata->vendor;
2900 fsg->product = pdata->product;
2903 fsg->release = pdata->release;
2904 fsg->nluns = pdata->nluns;
2910 static struct platform_driver fsg_platform_driver = {
2911 .driver = { .name = "usb_mass_storage", },
2915 int mass_storage_bind_config(struct usb_configuration *c)
2918 struct fsg_dev *fsg;
2920 printk(KERN_INFO "mass_storage_bind_config\n");
2926 spin_lock_init(&fsg->lock);
2927 init_rwsem(&fsg->filesem);
2928 kref_init(&fsg->ref);
2929 init_completion(&fsg->thread_notifier);
2931 the_fsg->buf_size = BULK_BUFFER_SIZE;
2932 the_fsg->sdev.name = DRIVER_NAME;
2933 the_fsg->sdev.print_name = print_switch_name;
2934 the_fsg->sdev.print_state = print_switch_state;
2935 rc = switch_dev_register(&the_fsg->sdev);
2937 goto err_switch_dev_register;
2939 rc = platform_driver_register(&fsg_platform_driver);
2941 goto err_platform_driver_register;
2943 wake_lock_init(&the_fsg->wake_lock, WAKE_LOCK_SUSPEND,
2944 "usb_mass_storage");
2946 fsg->cdev = c->cdev;
2947 fsg->function.name = shortname;
2948 fsg->function.descriptors = fs_function;
2949 fsg->function.bind = fsg_function_bind;
2950 fsg->function.unbind = fsg_function_unbind;
2951 fsg->function.setup = fsg_function_setup;
2952 fsg->function.set_alt = fsg_function_set_alt;
2953 fsg->function.disable = fsg_function_disable;
2955 rc = usb_add_function(c, &fsg->function);
2957 goto err_usb_add_function;
2962 err_usb_add_function:
2963 wake_lock_destroy(&the_fsg->wake_lock);
2964 platform_driver_unregister(&fsg_platform_driver);
2965 err_platform_driver_register:
2966 switch_dev_unregister(&the_fsg->sdev);
2967 err_switch_dev_register:
2968 kref_put(&the_fsg->ref, fsg_release);
2973 static struct android_usb_function mass_storage_function = {
2974 .name = "usb_mass_storage",
2975 .bind_config = mass_storage_bind_config,
2978 static int __init init(void)
2980 printk(KERN_INFO "f_mass_storage init\n");
2981 android_register_function(&mass_storage_function);