2 * Gadget Function Driver for MTP
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
19 /* #define VERBOSE_DEBUG */
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/types.h>
32 #include <linux/file.h>
33 #include <linux/device.h>
34 #include <linux/miscdevice.h>
36 #include <linux/usb.h>
37 #include <linux/usb_usual.h>
38 #include <linux/usb/ch9.h>
39 #include <linux/usb/f_mtp.h>
40 #include <linux/configfs.h>
41 #include <linux/usb/composite.h>
45 #define MTP_RX_BUFFER_INIT_SIZE 1048576
46 #define MTP_TX_BUFFER_INIT_SIZE 1048576
47 #define MTP_BULK_BUFFER_SIZE 16384
48 #define INTR_BUFFER_SIZE 28
49 #define MAX_INST_NAME_LEN 40
50 #define MTP_MAX_FILE_SIZE 0xFFFFFFFFL
53 #define INTERFACE_STRING_INDEX 0
55 /* values for mtp_dev.state */
56 #define STATE_OFFLINE 0 /* initial state, disconnected */
57 #define STATE_READY 1 /* ready for userspace calls */
58 #define STATE_BUSY 2 /* processing userspace calls */
59 #define STATE_CANCELED 3 /* transaction canceled by host */
60 #define STATE_ERROR 4 /* error from completion routine */
62 /* number of tx and rx requests to allocate */
63 #define MTP_TX_REQ_MAX 8
65 #define INTR_REQ_MAX 5
67 /* ID for Microsoft MTP OS String */
68 #define MTP_OS_STRING_ID 0xEE
70 /* MTP class reqeusts */
71 #define MTP_REQ_CANCEL 0x64
72 #define MTP_REQ_GET_EXT_EVENT_DATA 0x65
73 #define MTP_REQ_RESET 0x66
74 #define MTP_REQ_GET_DEVICE_STATUS 0x67
76 /* constants for device status */
77 #define MTP_RESPONSE_OK 0x2001
78 #define MTP_RESPONSE_DEVICE_BUSY 0x2019
79 #define DRIVER_NAME "mtp"
81 #define MAX_ITERATION 100
83 unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
84 module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
86 unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
87 module_param(mtp_tx_req_len, uint, S_IRUGO | S_IWUSR);
89 unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
90 module_param(mtp_tx_reqs, uint, S_IRUGO | S_IWUSR);
92 static const char mtp_shortname[] = DRIVER_NAME "_usb";
95 struct usb_function function;
96 struct usb_composite_dev *cdev;
100 struct usb_ep *ep_out;
101 struct usb_ep *ep_intr;
105 /* synchronize access to our device file */
107 /* to enforce only one ioctl at a time */
110 struct list_head tx_idle;
111 struct list_head intr_idle;
113 wait_queue_head_t read_wq;
114 wait_queue_head_t write_wq;
115 wait_queue_head_t intr_wq;
116 struct usb_request *rx_req[RX_REQ_MAX];
119 /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
120 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
122 struct workqueue_struct *wq;
123 struct work_struct send_file_work;
124 struct work_struct receive_file_work;
125 struct file *xfer_file;
126 loff_t xfer_file_offset;
127 int64_t xfer_file_length;
128 unsigned xfer_send_header;
129 uint16_t xfer_command;
130 uint32_t xfer_transaction_id;
133 unsigned long vfs_rbytes;
134 unsigned long vfs_wbytes;
137 } perf[MAX_ITERATION];
138 unsigned dbg_read_index;
139 unsigned dbg_write_index;
141 struct mutex read_mutex;
144 static struct usb_interface_descriptor mtp_interface_desc = {
145 .bLength = USB_DT_INTERFACE_SIZE,
146 .bDescriptorType = USB_DT_INTERFACE,
147 .bInterfaceNumber = 0,
149 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
150 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
151 .bInterfaceProtocol = 0,
154 static struct usb_interface_descriptor ptp_interface_desc = {
155 .bLength = USB_DT_INTERFACE_SIZE,
156 .bDescriptorType = USB_DT_INTERFACE,
157 .bInterfaceNumber = 0,
159 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
160 .bInterfaceSubClass = 1,
161 .bInterfaceProtocol = 1,
164 static struct usb_endpoint_descriptor mtp_ss_in_desc = {
165 .bLength = USB_DT_ENDPOINT_SIZE,
166 .bDescriptorType = USB_DT_ENDPOINT,
167 .bEndpointAddress = USB_DIR_IN,
168 .bmAttributes = USB_ENDPOINT_XFER_BULK,
169 .wMaxPacketSize = cpu_to_le16(1024),
172 static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
173 .bLength = sizeof(mtp_ss_in_comp_desc),
174 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
176 /* the following 2 values can be tweaked if necessary */
178 /* .bmAttributes = 0, */
182 static struct usb_endpoint_descriptor mtp_ss_out_desc = {
183 .bLength = USB_DT_ENDPOINT_SIZE,
184 .bDescriptorType = USB_DT_ENDPOINT,
185 .bEndpointAddress = USB_DIR_OUT,
186 .bmAttributes = USB_ENDPOINT_XFER_BULK,
187 .wMaxPacketSize = cpu_to_le16(1024),
190 static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
191 .bLength = sizeof(mtp_ss_out_comp_desc),
192 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
194 /* the following 2 values can be tweaked if necessary */
196 /* .bmAttributes = 0, */
199 static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
200 .bLength = USB_DT_ENDPOINT_SIZE,
201 .bDescriptorType = USB_DT_ENDPOINT,
202 .bEndpointAddress = USB_DIR_IN,
203 .bmAttributes = USB_ENDPOINT_XFER_BULK,
204 .wMaxPacketSize = __constant_cpu_to_le16(512),
207 static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
208 .bLength = USB_DT_ENDPOINT_SIZE,
209 .bDescriptorType = USB_DT_ENDPOINT,
210 .bEndpointAddress = USB_DIR_OUT,
211 .bmAttributes = USB_ENDPOINT_XFER_BULK,
212 .wMaxPacketSize = __constant_cpu_to_le16(512),
215 static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
216 .bLength = USB_DT_ENDPOINT_SIZE,
217 .bDescriptorType = USB_DT_ENDPOINT,
218 .bEndpointAddress = USB_DIR_IN,
219 .bmAttributes = USB_ENDPOINT_XFER_BULK,
222 static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
223 .bLength = USB_DT_ENDPOINT_SIZE,
224 .bDescriptorType = USB_DT_ENDPOINT,
225 .bEndpointAddress = USB_DIR_OUT,
226 .bmAttributes = USB_ENDPOINT_XFER_BULK,
229 static struct usb_endpoint_descriptor mtp_intr_desc = {
230 .bLength = USB_DT_ENDPOINT_SIZE,
231 .bDescriptorType = USB_DT_ENDPOINT,
232 .bEndpointAddress = USB_DIR_IN,
233 .bmAttributes = USB_ENDPOINT_XFER_INT,
234 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
238 static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
239 .bLength = sizeof(mtp_intr_ss_comp_desc),
240 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
242 /* the following 3 values can be tweaked if necessary */
243 /* .bMaxBurst = 0, */
244 /* .bmAttributes = 0, */
245 .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
248 static struct usb_descriptor_header *fs_mtp_descs[] = {
249 (struct usb_descriptor_header *) &mtp_interface_desc,
250 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
251 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
252 (struct usb_descriptor_header *) &mtp_intr_desc,
256 static struct usb_descriptor_header *hs_mtp_descs[] = {
257 (struct usb_descriptor_header *) &mtp_interface_desc,
258 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
259 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
260 (struct usb_descriptor_header *) &mtp_intr_desc,
264 static struct usb_descriptor_header *ss_mtp_descs[] = {
265 (struct usb_descriptor_header *) &mtp_interface_desc,
266 (struct usb_descriptor_header *) &mtp_ss_in_desc,
267 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
268 (struct usb_descriptor_header *) &mtp_ss_out_desc,
269 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
270 (struct usb_descriptor_header *) &mtp_intr_desc,
271 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
275 static struct usb_descriptor_header *fs_ptp_descs[] = {
276 (struct usb_descriptor_header *) &ptp_interface_desc,
277 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
278 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
279 (struct usb_descriptor_header *) &mtp_intr_desc,
283 static struct usb_descriptor_header *hs_ptp_descs[] = {
284 (struct usb_descriptor_header *) &ptp_interface_desc,
285 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
286 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
287 (struct usb_descriptor_header *) &mtp_intr_desc,
291 static struct usb_descriptor_header *ss_ptp_descs[] = {
292 (struct usb_descriptor_header *) &ptp_interface_desc,
293 (struct usb_descriptor_header *) &mtp_ss_in_desc,
294 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
295 (struct usb_descriptor_header *) &mtp_ss_out_desc,
296 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
297 (struct usb_descriptor_header *) &mtp_intr_desc,
298 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
302 static struct usb_string mtp_string_defs[] = {
303 /* Naming interface "MTP" so libmtp will recognize us */
304 [INTERFACE_STRING_INDEX].s = "MTP",
305 { }, /* end of list */
308 static struct usb_gadget_strings mtp_string_table = {
309 .language = 0x0409, /* en-US */
310 .strings = mtp_string_defs,
313 static struct usb_gadget_strings *mtp_strings[] = {
318 /* Microsoft MTP OS String */
319 static u8 mtp_os_string[] = {
320 18, /* sizeof(mtp_os_string) */
322 /* Signature field: "MSFT100" */
323 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
330 /* Microsoft Extended Configuration Descriptor Header Section */
331 struct mtp_ext_config_desc_header {
339 /* Microsoft Extended Configuration Descriptor Function Section */
340 struct mtp_ext_config_desc_function {
341 __u8 bFirstInterfaceNumber;
342 __u8 bInterfaceCount;
343 __u8 compatibleID[8];
344 __u8 subCompatibleID[8];
348 /* MTP Extended Configuration Descriptor */
349 struct ext_mtp_desc {
350 struct mtp_ext_config_desc_header header;
351 struct mtp_ext_config_desc_function function;
354 struct ext_mtp_desc mtp_ext_config_desc = {
356 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
357 .bcdVersion = __constant_cpu_to_le16(0x0100),
358 .wIndex = __constant_cpu_to_le16(4),
362 .bFirstInterfaceNumber = 0,
363 .bInterfaceCount = 1,
364 .compatibleID = { 'M', 'T', 'P' },
368 struct ext_mtp_desc ptp_ext_config_desc = {
370 .dwLength = cpu_to_le32(sizeof(mtp_ext_config_desc)),
371 .bcdVersion = cpu_to_le16(0x0100),
372 .wIndex = cpu_to_le16(4),
373 .bCount = cpu_to_le16(1),
376 .bFirstInterfaceNumber = 0,
377 .bInterfaceCount = 1,
378 .compatibleID = { 'P', 'T', 'P' },
382 struct mtp_device_status {
387 struct mtp_data_header {
388 /* length of packet, including this header */
390 /* container type (2 for data packet) */
392 /* MTP command code */
394 /* MTP transaction ID */
395 __le32 transaction_id;
398 struct mtp_instance {
399 struct usb_function_instance func_inst;
402 char mtp_ext_compat_id[16];
403 struct usb_os_desc mtp_os_desc;
406 /* temporary variable used between mtp_open() and mtp_gadget_bind() */
407 static struct mtp_dev *_mtp_dev;
409 static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
411 return container_of(f, struct mtp_dev, function);
414 static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
416 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
421 /* now allocate buffers for the requests */
422 req->buf = kmalloc(buffer_size, GFP_KERNEL);
424 usb_ep_free_request(ep, req);
431 static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
435 usb_ep_free_request(ep, req);
439 static inline int mtp_lock(atomic_t *excl)
441 if (atomic_inc_return(excl) == 1) {
449 static inline void mtp_unlock(atomic_t *excl)
454 /* add a request to the tail of a list */
455 static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
456 struct usb_request *req)
460 spin_lock_irqsave(&dev->lock, flags);
461 list_add_tail(&req->list, head);
462 spin_unlock_irqrestore(&dev->lock, flags);
465 /* remove a request from the head of a list */
466 static struct usb_request
467 *mtp_req_get(struct mtp_dev *dev, struct list_head *head)
470 struct usb_request *req;
472 spin_lock_irqsave(&dev->lock, flags);
473 if (list_empty(head)) {
476 req = list_first_entry(head, struct usb_request, list);
477 list_del(&req->list);
479 spin_unlock_irqrestore(&dev->lock, flags);
483 static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
485 struct mtp_dev *dev = _mtp_dev;
487 if (req->status != 0 && dev->state != STATE_OFFLINE)
488 dev->state = STATE_ERROR;
490 mtp_req_put(dev, &dev->tx_idle, req);
492 wake_up(&dev->write_wq);
495 static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
497 struct mtp_dev *dev = _mtp_dev;
500 if (req->status != 0 && dev->state != STATE_OFFLINE)
501 dev->state = STATE_ERROR;
503 wake_up(&dev->read_wq);
506 static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
508 struct mtp_dev *dev = _mtp_dev;
510 if (req->status != 0 && dev->state != STATE_OFFLINE)
511 dev->state = STATE_ERROR;
513 mtp_req_put(dev, &dev->intr_idle, req);
515 wake_up(&dev->intr_wq);
518 static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
519 struct usb_endpoint_descriptor *in_desc,
520 struct usb_endpoint_descriptor *out_desc,
521 struct usb_endpoint_descriptor *intr_desc)
523 struct usb_composite_dev *cdev = dev->cdev;
524 struct usb_request *req;
528 DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
530 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
532 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
535 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
536 ep->driver_data = dev; /* claim the endpoint */
539 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
541 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
544 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
545 ep->driver_data = dev; /* claim the endpoint */
548 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
550 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
553 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
554 ep->driver_data = dev; /* claim the endpoint */
558 /* now allocate requests for our endpoints */
559 for (i = 0; i < mtp_tx_reqs; i++) {
560 req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
562 if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
564 while ((req = mtp_req_get(dev, &dev->tx_idle)))
565 mtp_request_free(req, dev->ep_in);
566 mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
567 mtp_tx_reqs = MTP_TX_REQ_MAX;
570 req->complete = mtp_complete_in;
571 mtp_req_put(dev, &dev->tx_idle, req);
575 * The RX buffer should be aligned to EP max packet for
576 * some controllers. At bind time, we don't know the
577 * operational speed. Hence assuming super speed max
580 if (mtp_rx_req_len % 1024)
581 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
584 for (i = 0; i < RX_REQ_MAX; i++) {
585 req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
587 if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
589 for (--i; i >= 0; i--)
590 mtp_request_free(dev->rx_req[i], dev->ep_out);
591 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
594 req->complete = mtp_complete_out;
595 dev->rx_req[i] = req;
597 for (i = 0; i < INTR_REQ_MAX; i++) {
598 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
601 req->complete = mtp_complete_intr;
602 mtp_req_put(dev, &dev->intr_idle, req);
608 pr_err("mtp_bind() could not allocate requests\n");
612 static ssize_t mtp_read(struct file *fp, char __user *buf,
613 size_t count, loff_t *pos)
615 struct mtp_dev *dev = fp->private_data;
616 struct usb_composite_dev *cdev = dev->cdev;
617 struct usb_request *req;
618 ssize_t r = count, xfer, len;
621 DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
623 /* we will block until we're online */
624 DBG(cdev, "mtp_read: waiting for online state\n");
625 ret = wait_event_interruptible(dev->read_wq,
626 dev->state != STATE_OFFLINE);
632 len = ALIGN(count, dev->ep_out->maxpacket);
633 if (len > mtp_rx_req_len)
636 spin_lock_irq(&dev->lock);
637 if (dev->state == STATE_CANCELED) {
638 /* report cancelation to userspace */
639 dev->state = STATE_READY;
640 spin_unlock_irq(&dev->lock);
643 dev->state = STATE_BUSY;
644 spin_unlock_irq(&dev->lock);
646 mutex_lock(&dev->read_mutex);
647 if (dev->state == STATE_OFFLINE) {
649 mutex_unlock(&dev->read_mutex);
653 /* queue a request */
654 req = dev->rx_req[0];
657 mutex_unlock(&dev->read_mutex);
658 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
663 DBG(cdev, "rx %pK queue\n", req);
666 /* wait for a request to complete */
667 ret = wait_event_interruptible(dev->read_wq,
668 dev->rx_done || dev->state != STATE_BUSY);
669 if (dev->state == STATE_CANCELED) {
672 usb_ep_dequeue(dev->ep_out, req);
673 spin_lock_irq(&dev->lock);
674 dev->state = STATE_CANCELED;
675 spin_unlock_irq(&dev->lock);
680 usb_ep_dequeue(dev->ep_out, req);
683 mutex_lock(&dev->read_mutex);
684 if (dev->state == STATE_BUSY) {
685 /* If we got a 0-len packet, throw it back and try again. */
686 if (req->actual == 0)
689 DBG(cdev, "rx %pK %d\n", req, req->actual);
690 xfer = (req->actual < count) ? req->actual : count;
692 if (copy_to_user(buf, req->buf, xfer))
697 mutex_unlock(&dev->read_mutex);
699 spin_lock_irq(&dev->lock);
700 if (dev->state == STATE_CANCELED)
702 else if (dev->state != STATE_OFFLINE)
703 dev->state = STATE_READY;
704 spin_unlock_irq(&dev->lock);
706 DBG(cdev, "mtp_read returning %zd state:%d\n", r, dev->state);
710 static ssize_t mtp_write(struct file *fp, const char __user *buf,
711 size_t count, loff_t *pos)
713 struct mtp_dev *dev = fp->private_data;
714 struct usb_composite_dev *cdev = dev->cdev;
715 struct usb_request *req = 0;
721 DBG(cdev, "mtp_write(%zu) state:%d\n", count, dev->state);
723 spin_lock_irq(&dev->lock);
724 if (dev->state == STATE_CANCELED) {
725 /* report cancelation to userspace */
726 dev->state = STATE_READY;
727 spin_unlock_irq(&dev->lock);
730 if (dev->state == STATE_OFFLINE) {
731 spin_unlock_irq(&dev->lock);
734 dev->state = STATE_BUSY;
735 spin_unlock_irq(&dev->lock);
737 /* we need to send a zero length packet to signal the end of transfer
738 * if the transfer size is aligned to a packet boundary.
740 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
743 while (count > 0 || sendZLP) {
744 /* so we exit after sending ZLP */
748 if (dev->state != STATE_BUSY) {
749 DBG(cdev, "mtp_write dev->error\n");
754 /* get an idle tx request to use */
756 ret = wait_event_interruptible(dev->write_wq,
757 ((req = mtp_req_get(dev, &dev->tx_idle))
758 || dev->state != STATE_BUSY));
760 DBG(cdev, "mtp_write request NULL ret:%d state:%d\n",
766 if (count > mtp_tx_req_len)
767 xfer = mtp_tx_req_len;
770 if (xfer && copy_from_user(req->buf, buf, xfer)) {
776 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
778 DBG(cdev, "mtp_write: xfer error %d\n", ret);
786 /* zero this so we don't try to free it on error exit */
791 mtp_req_put(dev, &dev->tx_idle, req);
793 spin_lock_irq(&dev->lock);
794 if (dev->state == STATE_CANCELED)
796 else if (dev->state != STATE_OFFLINE)
797 dev->state = STATE_READY;
798 spin_unlock_irq(&dev->lock);
800 DBG(cdev, "mtp_write returning %zd state:%d\n", r, dev->state);
804 /* read from a local file and write to USB */
805 static void send_file_work(struct work_struct *data)
807 struct mtp_dev *dev = container_of(data, struct mtp_dev,
809 struct usb_composite_dev *cdev = dev->cdev;
810 struct usb_request *req = 0;
811 struct mtp_data_header *header;
815 int xfer, ret, hdr_size;
820 /* read our parameters */
822 filp = dev->xfer_file;
823 offset = dev->xfer_file_offset;
824 count = dev->xfer_file_length;
826 DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
828 if (dev->xfer_send_header) {
829 hdr_size = sizeof(struct mtp_data_header);
835 /* we need to send a zero length packet to signal the end of transfer
836 * if the transfer size is aligned to a packet boundary.
838 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
841 while (count > 0 || sendZLP) {
842 /* so we exit after sending ZLP */
846 /* get an idle tx request to use */
848 ret = wait_event_interruptible(dev->write_wq,
849 (req = mtp_req_get(dev, &dev->tx_idle))
850 || dev->state != STATE_BUSY);
851 if (dev->state == STATE_CANCELED) {
857 "send_file_work request NULL ret:%d state:%d\n",
863 if (count > mtp_tx_req_len)
864 xfer = mtp_tx_req_len;
869 /* prepend MTP data header */
870 header = (struct mtp_data_header *)req->buf;
872 * set file size with header according to
873 * MTP Specification v1.0
875 header->length = (count > MTP_MAX_FILE_SIZE) ?
876 MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
877 header->type = __cpu_to_le16(2); /* data packet */
878 header->command = __cpu_to_le16(dev->xfer_command);
879 header->transaction_id =
880 __cpu_to_le32(dev->xfer_transaction_id);
882 start_time = ktime_get();
883 ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
890 xfer = ret + hdr_size;
891 dev->perf[dev->dbg_read_index].vfs_rtime =
892 ktime_to_us(ktime_sub(ktime_get(), start_time));
893 dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
894 dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
898 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
900 DBG(cdev, "send_file_work: xfer error %d\n", ret);
901 if (dev->state != STATE_OFFLINE)
902 dev->state = STATE_ERROR;
909 /* zero this so we don't try to free it on error exit */
914 mtp_req_put(dev, &dev->tx_idle, req);
916 DBG(cdev, "send_file_work returning %d state:%d\n", r, dev->state);
917 /* write the result */
918 dev->xfer_result = r;
922 /* read from USB and write to a local file */
923 static void receive_file_work(struct work_struct *data)
925 struct mtp_dev *dev = container_of(data, struct mtp_dev,
927 struct usb_composite_dev *cdev = dev->cdev;
928 struct usb_request *read_req = NULL, *write_req = NULL;
932 int ret, cur_buf = 0;
936 /* read our parameters */
938 filp = dev->xfer_file;
939 offset = dev->xfer_file_offset;
940 count = dev->xfer_file_length;
942 DBG(cdev, "receive_file_work(%lld)\n", count);
943 if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
944 DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
945 count, dev->ep_out->maxpacket);
947 while (count > 0 || write_req) {
949 mutex_lock(&dev->read_mutex);
950 if (dev->state == STATE_OFFLINE) {
952 mutex_unlock(&dev->read_mutex);
955 /* queue a request */
956 read_req = dev->rx_req[cur_buf];
957 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
959 /* some h/w expects size to be aligned to ep's MTU */
960 read_req->length = mtp_rx_req_len;
963 mutex_unlock(&dev->read_mutex);
964 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
967 if (dev->state != STATE_OFFLINE)
968 dev->state = STATE_ERROR;
974 DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
975 start_time = ktime_get();
976 mutex_lock(&dev->read_mutex);
977 if (dev->state == STATE_OFFLINE) {
979 mutex_unlock(&dev->read_mutex);
982 ret = vfs_write(filp, write_req->buf, write_req->actual,
984 DBG(cdev, "vfs_write %d\n", ret);
985 if (ret != write_req->actual) {
987 mutex_unlock(&dev->read_mutex);
988 if (dev->state != STATE_OFFLINE)
989 dev->state = STATE_ERROR;
992 mutex_unlock(&dev->read_mutex);
993 dev->perf[dev->dbg_write_index].vfs_wtime =
994 ktime_to_us(ktime_sub(ktime_get(), start_time));
995 dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
996 dev->dbg_write_index =
997 (dev->dbg_write_index + 1) % MAX_ITERATION;
1002 /* wait for our last read to complete */
1003 ret = wait_event_interruptible(dev->read_wq,
1004 dev->rx_done || dev->state != STATE_BUSY);
1005 if (dev->state == STATE_CANCELED
1006 || dev->state == STATE_OFFLINE) {
1007 if (dev->state == STATE_OFFLINE)
1012 usb_ep_dequeue(dev->ep_out, read_req);
1016 mutex_lock(&dev->read_mutex);
1017 if (dev->state == STATE_OFFLINE) {
1019 mutex_unlock(&dev->read_mutex);
1022 /* Check if we aligned the size due to MTU constraint */
1023 if (count < read_req->length)
1024 read_req->actual = (read_req->actual > count ?
1025 count : read_req->actual);
1026 /* if xfer_file_length is 0xFFFFFFFF, then we read until
1027 * we get a zero length packet
1029 if (count != 0xFFFFFFFF)
1030 count -= read_req->actual;
1031 if (read_req->actual < read_req->length) {
1033 * short packet is used to signal EOF for
1036 DBG(cdev, "got short packet\n");
1040 write_req = read_req;
1042 mutex_unlock(&dev->read_mutex);
1046 DBG(cdev, "receive_file_work returning %d\n", r);
1047 /* write the result */
1048 dev->xfer_result = r;
1052 static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
1054 struct usb_request *req = NULL;
1056 int length = event->length;
1058 DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
1060 if (length < 0 || length > INTR_BUFFER_SIZE)
1062 if (dev->state == STATE_OFFLINE)
1065 ret = wait_event_interruptible_timeout(dev->intr_wq,
1066 (req = mtp_req_get(dev, &dev->intr_idle)),
1067 msecs_to_jiffies(1000));
1071 if (copy_from_user(req->buf, (void __user *)event->data, length)) {
1072 mtp_req_put(dev, &dev->intr_idle, req);
1075 req->length = length;
1076 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
1078 mtp_req_put(dev, &dev->intr_idle, req);
1083 static long mtp_send_receive_ioctl(struct file *fp, unsigned code,
1084 struct mtp_file_range *mfr)
1086 struct mtp_dev *dev = fp->private_data;
1087 struct file *filp = NULL;
1088 struct work_struct *work;
1091 if (mtp_lock(&dev->ioctl_excl)) {
1092 DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
1096 spin_lock_irq(&dev->lock);
1097 if (dev->state == STATE_CANCELED) {
1098 /* report cancellation to userspace */
1099 dev->state = STATE_READY;
1100 spin_unlock_irq(&dev->lock);
1104 if (dev->state == STATE_OFFLINE) {
1105 spin_unlock_irq(&dev->lock);
1109 dev->state = STATE_BUSY;
1110 spin_unlock_irq(&dev->lock);
1112 /* hold a reference to the file while we are working with it */
1113 filp = fget(mfr->fd);
1119 /* write the parameters */
1120 dev->xfer_file = filp;
1121 dev->xfer_file_offset = mfr->offset;
1122 dev->xfer_file_length = mfr->length;
1123 /* make sure write is done before parameters are read */
1126 if (code == MTP_SEND_FILE_WITH_HEADER) {
1127 work = &dev->send_file_work;
1128 dev->xfer_send_header = 1;
1129 dev->xfer_command = mfr->command;
1130 dev->xfer_transaction_id = mfr->transaction_id;
1131 } else if (code == MTP_SEND_FILE) {
1132 work = &dev->send_file_work;
1133 dev->xfer_send_header = 0;
1135 work = &dev->receive_file_work;
1138 /* We do the file transfer on a work queue so it will run
1139 * in kernel context, which is necessary for vfs_read and
1140 * vfs_write to use our buffers in the kernel address space.
1142 queue_work(dev->wq, work);
1143 /* wait for operation to complete */
1144 flush_workqueue(dev->wq);
1147 /* read the result */
1149 ret = dev->xfer_result;
1152 spin_lock_irq(&dev->lock);
1153 if (dev->state == STATE_CANCELED)
1155 else if (dev->state != STATE_OFFLINE)
1156 dev->state = STATE_READY;
1157 spin_unlock_irq(&dev->lock);
1159 mtp_unlock(&dev->ioctl_excl);
1160 DBG(dev->cdev, "ioctl returning %d state:%d\n", ret, dev->state);
1164 static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
1166 struct mtp_dev *dev = fp->private_data;
1167 struct mtp_file_range mfr;
1168 struct mtp_event event;
1173 case MTP_RECEIVE_FILE:
1174 case MTP_SEND_FILE_WITH_HEADER:
1175 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
1179 ret = mtp_send_receive_ioctl(fp, code, &mfr);
1181 case MTP_SEND_EVENT:
1182 if (mtp_lock(&dev->ioctl_excl))
1184 /* return here so we don't change dev->state below,
1185 * which would interfere with bulk transfer state.
1187 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
1190 ret = mtp_send_event(dev, &event);
1191 mtp_unlock(&dev->ioctl_excl);
1194 DBG(dev->cdev, "unknown ioctl code: %d\n", code);
1201 * 32 bit userspace calling into 64 bit kernel. handle ioctl code
1202 * and userspace pointer
1204 #ifdef CONFIG_COMPAT
1205 static long compat_mtp_ioctl(struct file *fp, unsigned code,
1206 unsigned long value)
1208 struct mtp_dev *dev = fp->private_data;
1209 struct mtp_file_range mfr;
1210 struct __compat_mtp_file_range cmfr;
1211 struct mtp_event event;
1212 struct __compat_mtp_event cevent;
1214 bool send_file = false;
1218 case COMPAT_MTP_SEND_FILE:
1219 cmd = MTP_SEND_FILE;
1222 case COMPAT_MTP_RECEIVE_FILE:
1223 cmd = MTP_RECEIVE_FILE;
1226 case COMPAT_MTP_SEND_FILE_WITH_HEADER:
1227 cmd = MTP_SEND_FILE_WITH_HEADER;
1230 case COMPAT_MTP_SEND_EVENT:
1231 cmd = MTP_SEND_EVENT;
1234 DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
1240 if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
1245 mfr.offset = cmfr.offset;
1246 mfr.length = cmfr.length;
1247 mfr.command = cmfr.command;
1248 mfr.transaction_id = cmfr.transaction_id;
1249 ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
1251 if (mtp_lock(&dev->ioctl_excl))
1253 /* return here so we don't change dev->state below,
1254 * which would interfere with bulk transfer state.
1256 if (copy_from_user(&cevent, (void __user *)value,
1261 event.length = cevent.length;
1262 event.data = compat_ptr(cevent.data);
1263 ret = mtp_send_event(dev, &event);
1264 mtp_unlock(&dev->ioctl_excl);
1271 static int mtp_open(struct inode *ip, struct file *fp)
1273 printk(KERN_INFO "mtp_open\n");
1274 if (mtp_lock(&_mtp_dev->open_excl)) {
1275 pr_err("%s mtp_release not called returning EBUSY\n", __func__);
1279 /* clear any error condition */
1280 if (_mtp_dev->state != STATE_OFFLINE)
1281 _mtp_dev->state = STATE_READY;
1283 fp->private_data = _mtp_dev;
1287 static int mtp_release(struct inode *ip, struct file *fp)
1289 printk(KERN_INFO "mtp_release\n");
1291 mtp_unlock(&_mtp_dev->open_excl);
1295 /* file operations for /dev/mtp_usb */
1296 static const struct file_operations mtp_fops = {
1297 .owner = THIS_MODULE,
1300 .unlocked_ioctl = mtp_ioctl,
1301 #ifdef CONFIG_COMPAT
1302 .compat_ioctl = compat_mtp_ioctl,
1305 .release = mtp_release,
1308 static struct miscdevice mtp_device = {
1309 .minor = MISC_DYNAMIC_MINOR,
1310 .name = mtp_shortname,
1314 static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
1315 const struct usb_ctrlrequest *ctrl)
1317 struct mtp_dev *dev = _mtp_dev;
1318 int value = -EOPNOTSUPP;
1319 u16 w_index = le16_to_cpu(ctrl->wIndex);
1320 u16 w_value = le16_to_cpu(ctrl->wValue);
1321 u16 w_length = le16_to_cpu(ctrl->wLength);
1322 unsigned long flags;
1324 VDBG(cdev, "mtp_ctrlrequest "
1325 "%02x.%02x v%04x i%04x l%u\n",
1326 ctrl->bRequestType, ctrl->bRequest,
1327 w_value, w_index, w_length);
1329 /* Handle MTP OS string */
1330 if (ctrl->bRequestType ==
1331 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1332 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1333 && (w_value >> 8) == USB_DT_STRING
1334 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1335 value = (w_length < sizeof(mtp_os_string)
1336 ? w_length : sizeof(mtp_os_string));
1337 memcpy(cdev->req->buf, mtp_os_string, value);
1338 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1339 /* Handle MTP OS descriptor */
1340 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1341 ctrl->bRequest, w_index, w_value, w_length);
1343 if (ctrl->bRequest == 1
1344 && (ctrl->bRequestType & USB_DIR_IN)
1345 && (w_index == 4 || w_index == 5)) {
1348 sizeof(mtp_ext_config_desc) ?
1350 sizeof(mtp_ext_config_desc));
1351 memcpy(cdev->req->buf, &mtp_ext_config_desc,
1355 sizeof(ptp_ext_config_desc) ?
1357 sizeof(ptp_ext_config_desc));
1358 memcpy(cdev->req->buf, &ptp_ext_config_desc,
1362 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1363 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1364 ctrl->bRequest, w_index, w_value, w_length);
1366 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1368 DBG(cdev, "MTP_REQ_CANCEL\n");
1370 spin_lock_irqsave(&dev->lock, flags);
1371 if (dev->state == STATE_BUSY) {
1372 dev->state = STATE_CANCELED;
1373 wake_up(&dev->read_wq);
1374 wake_up(&dev->write_wq);
1376 spin_unlock_irqrestore(&dev->lock, flags);
1378 /* We need to queue a request to read the remaining
1379 * bytes, but we don't actually need to look at
1383 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1384 && w_index == 0 && w_value == 0) {
1385 struct mtp_device_status *status = cdev->req->buf;
1388 __constant_cpu_to_le16(sizeof(*status));
1390 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1391 spin_lock_irqsave(&dev->lock, flags);
1392 /* device status is "busy" until we report
1393 * the cancelation to userspace
1395 if (dev->state == STATE_CANCELED)
1397 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1400 __cpu_to_le16(MTP_RESPONSE_OK);
1401 spin_unlock_irqrestore(&dev->lock, flags);
1402 value = sizeof(*status);
1406 /* respond with data transfer or status phase? */
1410 cdev->req->zero = value < w_length;
1411 cdev->req->length = value;
1412 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1414 ERROR(cdev, "%s: response queue error\n", __func__);
1420 mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
1422 struct usb_composite_dev *cdev = c->cdev;
1423 struct mtp_dev *dev = func_to_mtp(f);
1426 struct mtp_instance *fi_mtp;
1429 DBG(cdev, "mtp_function_bind dev: %pK\n", dev);
1431 /* allocate interface ID(s) */
1432 id = usb_interface_id(c, f);
1435 mtp_interface_desc.bInterfaceNumber = id;
1437 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1438 ret = usb_string_id(c->cdev);
1441 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1442 mtp_interface_desc.iInterface = ret;
1445 fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
1447 if (cdev->use_os_string) {
1448 f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
1450 if (!f->os_desc_table)
1453 f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
1456 /* allocate endpoints */
1457 ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
1458 &mtp_fullspeed_out_desc, &mtp_intr_desc);
1462 /* support high speed hardware */
1463 if (gadget_is_dualspeed(c->cdev->gadget)) {
1464 mtp_highspeed_in_desc.bEndpointAddress =
1465 mtp_fullspeed_in_desc.bEndpointAddress;
1466 mtp_highspeed_out_desc.bEndpointAddress =
1467 mtp_fullspeed_out_desc.bEndpointAddress;
1469 /* support super speed hardware */
1470 if (gadget_is_superspeed(c->cdev->gadget)) {
1473 /* Calculate bMaxBurst, we know packet size is 1024 */
1474 max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
1475 mtp_ss_in_desc.bEndpointAddress =
1476 mtp_fullspeed_in_desc.bEndpointAddress;
1477 mtp_ss_in_comp_desc.bMaxBurst = max_burst;
1478 mtp_ss_out_desc.bEndpointAddress =
1479 mtp_fullspeed_out_desc.bEndpointAddress;
1480 mtp_ss_out_comp_desc.bMaxBurst = max_burst;
1483 /* support super speed hardware */
1484 if (gadget_is_superspeed(c->cdev->gadget)) {
1485 mtp_ss_in_desc.bEndpointAddress =
1486 mtp_fullspeed_in_desc.bEndpointAddress;
1487 mtp_ss_out_desc.bEndpointAddress =
1488 mtp_fullspeed_out_desc.bEndpointAddress;
1491 fi_mtp->func_inst.f = &dev->function;
1492 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
1493 gadget_is_superspeed(c->cdev->gadget) ? "super" :
1494 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
1495 f->name, dev->ep_in->name, dev->ep_out->name);
1500 mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1502 struct mtp_dev *dev = func_to_mtp(f);
1503 struct mtp_instance *fi_mtp;
1504 struct usb_request *req;
1506 fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
1507 mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
1508 mutex_lock(&dev->read_mutex);
1509 while ((req = mtp_req_get(dev, &dev->tx_idle)))
1510 mtp_request_free(req, dev->ep_in);
1511 for (i = 0; i < RX_REQ_MAX; i++)
1512 mtp_request_free(dev->rx_req[i], dev->ep_out);
1513 while ((req = mtp_req_get(dev, &dev->intr_idle)))
1514 mtp_request_free(req, dev->ep_intr);
1515 mutex_unlock(&dev->read_mutex);
1516 dev->state = STATE_OFFLINE;
1517 dev->is_ptp = false;
1518 kfree(f->os_desc_table);
1520 fi_mtp->func_inst.f = NULL;
1523 static int mtp_function_set_alt(struct usb_function *f,
1524 unsigned intf, unsigned alt)
1526 struct mtp_dev *dev = func_to_mtp(f);
1527 struct usb_composite_dev *cdev = f->config->cdev;
1530 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1532 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
1536 ret = usb_ep_enable(dev->ep_in);
1540 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
1544 ret = usb_ep_enable(dev->ep_out);
1546 usb_ep_disable(dev->ep_in);
1550 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
1554 ret = usb_ep_enable(dev->ep_intr);
1556 usb_ep_disable(dev->ep_out);
1557 usb_ep_disable(dev->ep_in);
1560 dev->state = STATE_READY;
1562 /* readers may be blocked waiting for us to go online */
1563 wake_up(&dev->read_wq);
1567 static void mtp_function_disable(struct usb_function *f)
1569 struct mtp_dev *dev = func_to_mtp(f);
1570 struct usb_composite_dev *cdev = dev->cdev;
1572 DBG(cdev, "mtp_function_disable\n");
1573 dev->state = STATE_OFFLINE;
1574 usb_ep_disable(dev->ep_in);
1575 usb_ep_disable(dev->ep_out);
1576 usb_ep_disable(dev->ep_intr);
1578 /* readers may be blocked waiting for us to go online */
1579 wake_up(&dev->read_wq);
1581 VDBG(cdev, "%s disabled\n", dev->function.name);
1584 static int debug_mtp_read_stats(struct seq_file *s, void *unused)
1586 struct mtp_dev *dev = _mtp_dev;
1588 unsigned long flags;
1589 unsigned min, max = 0, sum = 0, iteration = 0;
1591 seq_puts(s, "\n=======================\n");
1592 seq_puts(s, "MTP Write Stats:\n");
1593 seq_puts(s, "\n=======================\n");
1594 spin_lock_irqsave(&dev->lock, flags);
1595 min = dev->perf[0].vfs_wtime;
1596 for (i = 0; i < MAX_ITERATION; i++) {
1597 seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
1598 dev->perf[i].vfs_wbytes,
1599 dev->perf[i].vfs_wtime);
1600 if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
1601 sum += dev->perf[i].vfs_wtime;
1602 if (min > dev->perf[i].vfs_wtime)
1603 min = dev->perf[i].vfs_wtime;
1604 if (max < dev->perf[i].vfs_wtime)
1605 max = dev->perf[i].vfs_wtime;
1610 seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
1611 min, max, sum / iteration);
1612 min = max = sum = iteration = 0;
1613 seq_puts(s, "\n=======================\n");
1614 seq_puts(s, "MTP Read Stats:\n");
1615 seq_puts(s, "\n=======================\n");
1617 min = dev->perf[0].vfs_rtime;
1618 for (i = 0; i < MAX_ITERATION; i++) {
1619 seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
1620 dev->perf[i].vfs_rbytes,
1621 dev->perf[i].vfs_rtime);
1622 if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
1623 sum += dev->perf[i].vfs_rtime;
1624 if (min > dev->perf[i].vfs_rtime)
1625 min = dev->perf[i].vfs_rtime;
1626 if (max < dev->perf[i].vfs_rtime)
1627 max = dev->perf[i].vfs_rtime;
1632 seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
1633 min, max, sum / iteration);
1634 spin_unlock_irqrestore(&dev->lock, flags);
1638 static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
1639 size_t count, loff_t *ppos)
1642 unsigned long flags;
1643 struct mtp_dev *dev = _mtp_dev;
1646 pr_err("[%s] EINVAL\n", __func__);
1650 if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
1651 pr_err("Wrong value. To clear stats, enter value as 0.\n");
1655 spin_lock_irqsave(&dev->lock, flags);
1656 memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
1657 dev->dbg_read_index = 0;
1658 dev->dbg_write_index = 0;
1659 spin_unlock_irqrestore(&dev->lock, flags);
1664 static int debug_mtp_open(struct inode *inode, struct file *file)
1666 return single_open(file, debug_mtp_read_stats, inode->i_private);
1669 static const struct file_operations debug_mtp_ops = {
1670 .open = debug_mtp_open,
1672 .write = debug_mtp_reset_stats,
1675 struct dentry *dent_mtp;
1676 static void mtp_debugfs_init(void)
1678 struct dentry *dent_mtp_status;
1680 dent_mtp = debugfs_create_dir("usb_mtp", 0);
1681 if (!dent_mtp || IS_ERR(dent_mtp))
1684 dent_mtp_status = debugfs_create_file("status", S_IRUGO | S_IWUSR,
1685 dent_mtp, 0, &debug_mtp_ops);
1686 if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
1687 debugfs_remove(dent_mtp);
1693 static void mtp_debugfs_remove(void)
1695 debugfs_remove_recursive(dent_mtp);
1698 static int __mtp_setup(struct mtp_instance *fi_mtp)
1700 struct mtp_dev *dev;
1703 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1711 spin_lock_init(&dev->lock);
1712 init_waitqueue_head(&dev->read_wq);
1713 init_waitqueue_head(&dev->write_wq);
1714 init_waitqueue_head(&dev->intr_wq);
1715 atomic_set(&dev->open_excl, 0);
1716 atomic_set(&dev->ioctl_excl, 0);
1717 INIT_LIST_HEAD(&dev->tx_idle);
1718 INIT_LIST_HEAD(&dev->intr_idle);
1720 dev->wq = create_singlethread_workqueue("f_mtp");
1725 INIT_WORK(&dev->send_file_work, send_file_work);
1726 INIT_WORK(&dev->receive_file_work, receive_file_work);
1730 ret = misc_register(&mtp_device);
1738 destroy_workqueue(dev->wq);
1742 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1746 static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
1748 return __mtp_setup(fi_mtp);
1752 static void mtp_cleanup(void)
1754 struct mtp_dev *dev = _mtp_dev;
1759 mtp_debugfs_remove();
1760 misc_deregister(&mtp_device);
1761 destroy_workqueue(dev->wq);
1766 static struct mtp_instance *to_mtp_instance(struct config_item *item)
1768 return container_of(to_config_group(item), struct mtp_instance,
1772 static void mtp_attr_release(struct config_item *item)
1774 struct mtp_instance *fi_mtp = to_mtp_instance(item);
1776 usb_put_function_instance(&fi_mtp->func_inst);
1779 static struct configfs_item_operations mtp_item_ops = {
1780 .release = mtp_attr_release,
1783 static struct config_item_type mtp_func_type = {
1784 .ct_item_ops = &mtp_item_ops,
1785 .ct_owner = THIS_MODULE,
1789 static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
1791 return container_of(fi, struct mtp_instance, func_inst);
1794 static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
1796 struct mtp_instance *fi_mtp;
1800 name_len = strlen(name) + 1;
1801 if (name_len > MAX_INST_NAME_LEN)
1802 return -ENAMETOOLONG;
1804 ptr = kstrndup(name, name_len, GFP_KERNEL);
1808 fi_mtp = to_fi_mtp(fi);
1814 static void mtp_free_inst(struct usb_function_instance *fi)
1816 struct mtp_instance *fi_mtp;
1818 fi_mtp = to_fi_mtp(fi);
1819 kfree(fi_mtp->name);
1821 kfree(fi_mtp->mtp_os_desc.group.default_groups);
1825 struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
1827 struct mtp_instance *fi_mtp;
1829 struct usb_os_desc *descs[1];
1832 fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
1834 return ERR_PTR(-ENOMEM);
1835 fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
1836 fi_mtp->func_inst.free_func_inst = mtp_free_inst;
1838 fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
1839 INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
1840 descs[0] = &fi_mtp->mtp_os_desc;
1842 usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
1843 descs, names, THIS_MODULE);
1846 ret = mtp_setup_configfs(fi_mtp);
1849 pr_err("Error setting MTP\n");
1850 return ERR_PTR(ret);
1853 fi_mtp->dev = _mtp_dev;
1855 config_group_init_type_name(&fi_mtp->func_inst.group,
1856 "", &mtp_func_type);
1858 mutex_init(&fi_mtp->dev->read_mutex);
1860 return &fi_mtp->func_inst;
1862 EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
1864 static struct usb_function_instance *mtp_alloc_inst(void)
1866 return alloc_inst_mtp_ptp(true);
1869 static int mtp_ctrlreq_configfs(struct usb_function *f,
1870 const struct usb_ctrlrequest *ctrl)
1872 return mtp_ctrlrequest(f->config->cdev, ctrl);
1875 static void mtp_free(struct usb_function *f)
1877 /*NO-OP: no function specific resource allocation in mtp_alloc*/
1880 struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
1883 struct mtp_instance *fi_mtp = to_fi_mtp(fi);
1884 struct mtp_dev *dev;
1887 * PTP piggybacks on MTP function so make sure we have
1888 * created MTP function before we associate this PTP
1889 * function with a gadget configuration.
1891 if (fi_mtp->dev == NULL) {
1892 pr_err("Error: Create MTP function before linking"
1893 " PTP function with a gadget configuration\n");
1894 pr_err("\t1: Delete existing PTP function if any\n");
1895 pr_err("\t2: Create MTP function\n");
1896 pr_err("\t3: Create and symlink PTP function"
1897 " with a gadget configuration\n");
1898 return ERR_PTR(-EINVAL); /* Invalid Configuration */
1902 dev->function.name = DRIVER_NAME;
1903 dev->function.strings = mtp_strings;
1905 dev->function.fs_descriptors = fs_mtp_descs;
1906 dev->function.hs_descriptors = hs_mtp_descs;
1907 dev->function.ss_descriptors = ss_mtp_descs;
1909 dev->function.fs_descriptors = fs_ptp_descs;
1910 dev->function.hs_descriptors = hs_ptp_descs;
1911 dev->function.ss_descriptors = ss_ptp_descs;
1913 dev->function.bind = mtp_function_bind;
1914 dev->function.unbind = mtp_function_unbind;
1915 dev->function.set_alt = mtp_function_set_alt;
1916 dev->function.disable = mtp_function_disable;
1917 dev->function.setup = mtp_ctrlreq_configfs;
1918 dev->function.free_func = mtp_free;
1919 dev->is_ptp = !mtp_config;
1921 return &dev->function;
1923 EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
1925 static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
1927 return function_alloc_mtp_ptp(fi, true);
1930 DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
1931 MODULE_LICENSE("GPL");