OSDN Git Service

Merge "Merge branch 'android-4.4@9796ea8' into branch 'msm-4.4'"
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / usb / gadget / function / f_mtp.c
1 /*
2  * Gadget Function Driver for MTP
3  *
4  * Copyright (C) 2010 Google, Inc.
5  * Author: Mike Lockwood <lockwood@android.com>
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 /* #define DEBUG */
19 /* #define VERBOSE_DEBUG */
20
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/types.h>
32 #include <linux/file.h>
33 #include <linux/device.h>
34 #include <linux/miscdevice.h>
35
36 #include <linux/usb.h>
37 #include <linux/usb_usual.h>
38 #include <linux/usb/ch9.h>
39 #include <linux/usb/f_mtp.h>
40 #include <linux/configfs.h>
41 #include <linux/usb/composite.h>
42
43 #include "configfs.h"
44
45 #define MTP_RX_BUFFER_INIT_SIZE    1048576
46 #define MTP_TX_BUFFER_INIT_SIZE    1048576
47 #define MTP_BULK_BUFFER_SIZE       16384
48 #define INTR_BUFFER_SIZE           28
49 #define MAX_INST_NAME_LEN          40
50 #define MTP_MAX_FILE_SIZE          0xFFFFFFFFL
51
52 /* String IDs */
53 #define INTERFACE_STRING_INDEX  0
54
55 /* values for mtp_dev.state */
56 #define STATE_OFFLINE               0   /* initial state, disconnected */
57 #define STATE_READY                 1   /* ready for userspace calls */
58 #define STATE_BUSY                  2   /* processing userspace calls */
59 #define STATE_CANCELED              3   /* transaction canceled by host */
60 #define STATE_ERROR                 4   /* error from completion routine */
61
62 /* number of tx and rx requests to allocate */
63 #define MTP_TX_REQ_MAX 8
64 #define RX_REQ_MAX 2
65 #define INTR_REQ_MAX 5
66
67 /* ID for Microsoft MTP OS String */
68 #define MTP_OS_STRING_ID   0xEE
69
70 /* MTP class reqeusts */
71 #define MTP_REQ_CANCEL              0x64
72 #define MTP_REQ_GET_EXT_EVENT_DATA  0x65
73 #define MTP_REQ_RESET               0x66
74 #define MTP_REQ_GET_DEVICE_STATUS   0x67
75
76 /* constants for device status */
77 #define MTP_RESPONSE_OK             0x2001
78 #define MTP_RESPONSE_DEVICE_BUSY    0x2019
79 #define DRIVER_NAME "mtp"
80
81 #define MAX_ITERATION           100
82
83 unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
84 module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
85
86 unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
87 module_param(mtp_tx_req_len, uint, S_IRUGO | S_IWUSR);
88
89 unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
90 module_param(mtp_tx_reqs, uint, S_IRUGO | S_IWUSR);
91
92 static const char mtp_shortname[] = DRIVER_NAME "_usb";
93
94 struct mtp_dev {
95         struct usb_function function;
96         struct usb_composite_dev *cdev;
97         spinlock_t lock;
98
99         struct usb_ep *ep_in;
100         struct usb_ep *ep_out;
101         struct usb_ep *ep_intr;
102
103         int state;
104
105         /* synchronize access to our device file */
106         atomic_t open_excl;
107         /* to enforce only one ioctl at a time */
108         atomic_t ioctl_excl;
109
110         struct list_head tx_idle;
111         struct list_head intr_idle;
112
113         wait_queue_head_t read_wq;
114         wait_queue_head_t write_wq;
115         wait_queue_head_t intr_wq;
116         struct usb_request *rx_req[RX_REQ_MAX];
117         int rx_done;
118
119         /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
120          * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
121          */
122         struct workqueue_struct *wq;
123         struct work_struct send_file_work;
124         struct work_struct receive_file_work;
125         struct file *xfer_file;
126         loff_t xfer_file_offset;
127         int64_t xfer_file_length;
128         unsigned xfer_send_header;
129         uint16_t xfer_command;
130         uint32_t xfer_transaction_id;
131         int xfer_result;
132         struct {
133                 unsigned long vfs_rbytes;
134                 unsigned long vfs_wbytes;
135                 unsigned vfs_rtime;
136                 unsigned vfs_wtime;
137         } perf[MAX_ITERATION];
138         unsigned dbg_read_index;
139         unsigned dbg_write_index;
140         bool is_ptp;
141         struct mutex  read_mutex;
142 };
143
144 static struct usb_interface_descriptor mtp_interface_desc = {
145         .bLength                = USB_DT_INTERFACE_SIZE,
146         .bDescriptorType        = USB_DT_INTERFACE,
147         .bInterfaceNumber       = 0,
148         .bNumEndpoints          = 3,
149         .bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
150         .bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
151         .bInterfaceProtocol     = 0,
152 };
153
154 static struct usb_interface_descriptor ptp_interface_desc = {
155         .bLength                = USB_DT_INTERFACE_SIZE,
156         .bDescriptorType        = USB_DT_INTERFACE,
157         .bInterfaceNumber       = 0,
158         .bNumEndpoints          = 3,
159         .bInterfaceClass        = USB_CLASS_STILL_IMAGE,
160         .bInterfaceSubClass     = 1,
161         .bInterfaceProtocol     = 1,
162 };
163
164 static struct usb_endpoint_descriptor mtp_ss_in_desc = {
165         .bLength                = USB_DT_ENDPOINT_SIZE,
166         .bDescriptorType        = USB_DT_ENDPOINT,
167         .bEndpointAddress       = USB_DIR_IN,
168         .bmAttributes           = USB_ENDPOINT_XFER_BULK,
169         .wMaxPacketSize         = cpu_to_le16(1024),
170 };
171
172 static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
173         .bLength =              sizeof(mtp_ss_in_comp_desc),
174         .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
175
176         /* the following 2 values can be tweaked if necessary */
177         .bMaxBurst =            2,
178         /* .bmAttributes =      0, */
179 };
180
181
182 static struct usb_endpoint_descriptor mtp_ss_out_desc = {
183         .bLength                = USB_DT_ENDPOINT_SIZE,
184         .bDescriptorType        = USB_DT_ENDPOINT,
185         .bEndpointAddress       = USB_DIR_OUT,
186         .bmAttributes           = USB_ENDPOINT_XFER_BULK,
187         .wMaxPacketSize         = cpu_to_le16(1024),
188 };
189
190 static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
191         .bLength =              sizeof(mtp_ss_out_comp_desc),
192         .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
193
194         /* the following 2 values can be tweaked if necessary */
195          .bMaxBurst =           2,
196         /* .bmAttributes =      0, */
197 };
198
199 static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
200         .bLength                = USB_DT_ENDPOINT_SIZE,
201         .bDescriptorType        = USB_DT_ENDPOINT,
202         .bEndpointAddress       = USB_DIR_IN,
203         .bmAttributes           = USB_ENDPOINT_XFER_BULK,
204         .wMaxPacketSize         = __constant_cpu_to_le16(512),
205 };
206
207 static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
208         .bLength                = USB_DT_ENDPOINT_SIZE,
209         .bDescriptorType        = USB_DT_ENDPOINT,
210         .bEndpointAddress       = USB_DIR_OUT,
211         .bmAttributes           = USB_ENDPOINT_XFER_BULK,
212         .wMaxPacketSize         = __constant_cpu_to_le16(512),
213 };
214
215 static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
216         .bLength                = USB_DT_ENDPOINT_SIZE,
217         .bDescriptorType        = USB_DT_ENDPOINT,
218         .bEndpointAddress       = USB_DIR_IN,
219         .bmAttributes           = USB_ENDPOINT_XFER_BULK,
220 };
221
222 static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
223         .bLength                = USB_DT_ENDPOINT_SIZE,
224         .bDescriptorType        = USB_DT_ENDPOINT,
225         .bEndpointAddress       = USB_DIR_OUT,
226         .bmAttributes           = USB_ENDPOINT_XFER_BULK,
227 };
228
229 static struct usb_endpoint_descriptor mtp_intr_desc = {
230         .bLength                = USB_DT_ENDPOINT_SIZE,
231         .bDescriptorType        = USB_DT_ENDPOINT,
232         .bEndpointAddress       = USB_DIR_IN,
233         .bmAttributes           = USB_ENDPOINT_XFER_INT,
234         .wMaxPacketSize         = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
235         .bInterval              = 6,
236 };
237
238 static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
239         .bLength =              sizeof(mtp_intr_ss_comp_desc),
240         .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
241
242         /* the following 3 values can be tweaked if necessary */
243         /* .bMaxBurst =         0, */
244         /* .bmAttributes =      0, */
245         .wBytesPerInterval =    cpu_to_le16(INTR_BUFFER_SIZE),
246 };
247
248 static struct usb_descriptor_header *fs_mtp_descs[] = {
249         (struct usb_descriptor_header *) &mtp_interface_desc,
250         (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
251         (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
252         (struct usb_descriptor_header *) &mtp_intr_desc,
253         NULL,
254 };
255
256 static struct usb_descriptor_header *hs_mtp_descs[] = {
257         (struct usb_descriptor_header *) &mtp_interface_desc,
258         (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
259         (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
260         (struct usb_descriptor_header *) &mtp_intr_desc,
261         NULL,
262 };
263
264 static struct usb_descriptor_header *ss_mtp_descs[] = {
265         (struct usb_descriptor_header *) &mtp_interface_desc,
266         (struct usb_descriptor_header *) &mtp_ss_in_desc,
267         (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
268         (struct usb_descriptor_header *) &mtp_ss_out_desc,
269         (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
270         (struct usb_descriptor_header *) &mtp_intr_desc,
271         (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
272         NULL,
273 };
274
275 static struct usb_descriptor_header *fs_ptp_descs[] = {
276         (struct usb_descriptor_header *) &ptp_interface_desc,
277         (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
278         (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
279         (struct usb_descriptor_header *) &mtp_intr_desc,
280         NULL,
281 };
282
283 static struct usb_descriptor_header *hs_ptp_descs[] = {
284         (struct usb_descriptor_header *) &ptp_interface_desc,
285         (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
286         (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
287         (struct usb_descriptor_header *) &mtp_intr_desc,
288         NULL,
289 };
290
291 static struct usb_descriptor_header *ss_ptp_descs[] = {
292         (struct usb_descriptor_header *) &ptp_interface_desc,
293         (struct usb_descriptor_header *) &mtp_ss_in_desc,
294         (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
295         (struct usb_descriptor_header *) &mtp_ss_out_desc,
296         (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
297         (struct usb_descriptor_header *) &mtp_intr_desc,
298         (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
299         NULL,
300 };
301
302 static struct usb_string mtp_string_defs[] = {
303         /* Naming interface "MTP" so libmtp will recognize us */
304         [INTERFACE_STRING_INDEX].s      = "MTP",
305         {  },   /* end of list */
306 };
307
308 static struct usb_gadget_strings mtp_string_table = {
309         .language               = 0x0409,       /* en-US */
310         .strings                = mtp_string_defs,
311 };
312
313 static struct usb_gadget_strings *mtp_strings[] = {
314         &mtp_string_table,
315         NULL,
316 };
317
318 /* Microsoft MTP OS String */
319 static u8 mtp_os_string[] = {
320         18, /* sizeof(mtp_os_string) */
321         USB_DT_STRING,
322         /* Signature field: "MSFT100" */
323         'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
324         /* vendor code */
325         1,
326         /* padding */
327         0
328 };
329
330 /* Microsoft Extended Configuration Descriptor Header Section */
331 struct mtp_ext_config_desc_header {
332         __le32  dwLength;
333         __u16   bcdVersion;
334         __le16  wIndex;
335         __u8    bCount;
336         __u8    reserved[7];
337 };
338
339 /* Microsoft Extended Configuration Descriptor Function Section */
340 struct mtp_ext_config_desc_function {
341         __u8    bFirstInterfaceNumber;
342         __u8    bInterfaceCount;
343         __u8    compatibleID[8];
344         __u8    subCompatibleID[8];
345         __u8    reserved[6];
346 };
347
348 /* MTP Extended Configuration Descriptor */
349 struct ext_mtp_desc {
350         struct mtp_ext_config_desc_header       header;
351         struct mtp_ext_config_desc_function    function;
352 };
353
354 struct ext_mtp_desc  mtp_ext_config_desc = {
355         .header = {
356                 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
357                 .bcdVersion = __constant_cpu_to_le16(0x0100),
358                 .wIndex = __constant_cpu_to_le16(4),
359                 .bCount = 1,
360         },
361         .function = {
362                 .bFirstInterfaceNumber = 0,
363                 .bInterfaceCount = 1,
364                 .compatibleID = { 'M', 'T', 'P' },
365         },
366 };
367
368 struct ext_mtp_desc ptp_ext_config_desc = {
369         .header = {
370                 .dwLength = cpu_to_le32(sizeof(mtp_ext_config_desc)),
371                 .bcdVersion = cpu_to_le16(0x0100),
372                 .wIndex = cpu_to_le16(4),
373                 .bCount = cpu_to_le16(1),
374         },
375         .function = {
376                 .bFirstInterfaceNumber = 0,
377                 .bInterfaceCount = 1,
378                 .compatibleID = { 'P', 'T', 'P' },
379         },
380 };
381
382 struct mtp_device_status {
383         __le16  wLength;
384         __le16  wCode;
385 };
386
387 struct mtp_data_header {
388         /* length of packet, including this header */
389         __le32  length;
390         /* container type (2 for data packet) */
391         __le16  type;
392         /* MTP command code */
393         __le16  command;
394         /* MTP transaction ID */
395         __le32  transaction_id;
396 };
397
398 struct mtp_instance {
399         struct usb_function_instance func_inst;
400         const char *name;
401         struct mtp_dev *dev;
402         char mtp_ext_compat_id[16];
403         struct usb_os_desc mtp_os_desc;
404 };
405
406 /* temporary variable used between mtp_open() and mtp_gadget_bind() */
407 static struct mtp_dev *_mtp_dev;
408
409 static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
410 {
411         return container_of(f, struct mtp_dev, function);
412 }
413
414 static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
415 {
416         struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
417
418         if (!req)
419                 return NULL;
420
421         /* now allocate buffers for the requests */
422         req->buf = kmalloc(buffer_size, GFP_KERNEL);
423         if (!req->buf) {
424                 usb_ep_free_request(ep, req);
425                 return NULL;
426         }
427
428         return req;
429 }
430
431 static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
432 {
433         if (req) {
434                 kfree(req->buf);
435                 usb_ep_free_request(ep, req);
436         }
437 }
438
439 static inline int mtp_lock(atomic_t *excl)
440 {
441         if (atomic_inc_return(excl) == 1) {
442                 return 0;
443         } else {
444                 atomic_dec(excl);
445                 return -1;
446         }
447 }
448
449 static inline void mtp_unlock(atomic_t *excl)
450 {
451         atomic_dec(excl);
452 }
453
454 /* add a request to the tail of a list */
455 static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
456                 struct usb_request *req)
457 {
458         unsigned long flags;
459
460         spin_lock_irqsave(&dev->lock, flags);
461         list_add_tail(&req->list, head);
462         spin_unlock_irqrestore(&dev->lock, flags);
463 }
464
465 /* remove a request from the head of a list */
466 static struct usb_request
467 *mtp_req_get(struct mtp_dev *dev, struct list_head *head)
468 {
469         unsigned long flags;
470         struct usb_request *req;
471
472         spin_lock_irqsave(&dev->lock, flags);
473         if (list_empty(head)) {
474                 req = 0;
475         } else {
476                 req = list_first_entry(head, struct usb_request, list);
477                 list_del(&req->list);
478         }
479         spin_unlock_irqrestore(&dev->lock, flags);
480         return req;
481 }
482
483 static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
484 {
485         struct mtp_dev *dev = _mtp_dev;
486
487         if (req->status != 0 && dev->state != STATE_OFFLINE)
488                 dev->state = STATE_ERROR;
489
490         mtp_req_put(dev, &dev->tx_idle, req);
491
492         wake_up(&dev->write_wq);
493 }
494
495 static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
496 {
497         struct mtp_dev *dev = _mtp_dev;
498
499         dev->rx_done = 1;
500         if (req->status != 0 && dev->state != STATE_OFFLINE)
501                 dev->state = STATE_ERROR;
502
503         wake_up(&dev->read_wq);
504 }
505
506 static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
507 {
508         struct mtp_dev *dev = _mtp_dev;
509
510         if (req->status != 0 && dev->state != STATE_OFFLINE)
511                 dev->state = STATE_ERROR;
512
513         mtp_req_put(dev, &dev->intr_idle, req);
514
515         wake_up(&dev->intr_wq);
516 }
517
518 static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
519                                 struct usb_endpoint_descriptor *in_desc,
520                                 struct usb_endpoint_descriptor *out_desc,
521                                 struct usb_endpoint_descriptor *intr_desc)
522 {
523         struct usb_composite_dev *cdev = dev->cdev;
524         struct usb_request *req;
525         struct usb_ep *ep;
526         int i;
527
528         DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
529
530         ep = usb_ep_autoconfig(cdev->gadget, in_desc);
531         if (!ep) {
532                 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
533                 return -ENODEV;
534         }
535         DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
536         ep->driver_data = dev;          /* claim the endpoint */
537         dev->ep_in = ep;
538
539         ep = usb_ep_autoconfig(cdev->gadget, out_desc);
540         if (!ep) {
541                 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
542                 return -ENODEV;
543         }
544         DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
545         ep->driver_data = dev;          /* claim the endpoint */
546         dev->ep_out = ep;
547
548         ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
549         if (!ep) {
550                 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
551                 return -ENODEV;
552         }
553         DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
554         ep->driver_data = dev;          /* claim the endpoint */
555         dev->ep_intr = ep;
556
557 retry_tx_alloc:
558         /* now allocate requests for our endpoints */
559         for (i = 0; i < mtp_tx_reqs; i++) {
560                 req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
561                 if (!req) {
562                         if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
563                                 goto fail;
564                         while ((req = mtp_req_get(dev, &dev->tx_idle)))
565                                 mtp_request_free(req, dev->ep_in);
566                         mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
567                         mtp_tx_reqs = MTP_TX_REQ_MAX;
568                         goto retry_tx_alloc;
569                 }
570                 req->complete = mtp_complete_in;
571                 mtp_req_put(dev, &dev->tx_idle, req);
572         }
573
574         /*
575          * The RX buffer should be aligned to EP max packet for
576          * some controllers.  At bind time, we don't know the
577          * operational speed.  Hence assuming super speed max
578          * packet size.
579          */
580         if (mtp_rx_req_len % 1024)
581                 mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
582
583 retry_rx_alloc:
584         for (i = 0; i < RX_REQ_MAX; i++) {
585                 req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
586                 if (!req) {
587                         if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
588                                 goto fail;
589                         for (--i; i >= 0; i--)
590                                 mtp_request_free(dev->rx_req[i], dev->ep_out);
591                         mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
592                         goto retry_rx_alloc;
593                 }
594                 req->complete = mtp_complete_out;
595                 dev->rx_req[i] = req;
596         }
597         for (i = 0; i < INTR_REQ_MAX; i++) {
598                 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
599                 if (!req)
600                         goto fail;
601                 req->complete = mtp_complete_intr;
602                 mtp_req_put(dev, &dev->intr_idle, req);
603         }
604
605         return 0;
606
607 fail:
608         pr_err("mtp_bind() could not allocate requests\n");
609         return -1;
610 }
611
612 static ssize_t mtp_read(struct file *fp, char __user *buf,
613         size_t count, loff_t *pos)
614 {
615         struct mtp_dev *dev = fp->private_data;
616         struct usb_composite_dev *cdev = dev->cdev;
617         struct usb_request *req;
618         ssize_t r = count, xfer, len;
619         int ret = 0;
620
621         DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
622
623         /* we will block until we're online */
624         DBG(cdev, "mtp_read: waiting for online state\n");
625         ret = wait_event_interruptible(dev->read_wq,
626                 dev->state != STATE_OFFLINE);
627         if (ret < 0) {
628                 r = ret;
629                 goto done;
630         }
631
632         len = ALIGN(count, dev->ep_out->maxpacket);
633         if (len > mtp_rx_req_len)
634                 return -EINVAL;
635
636         spin_lock_irq(&dev->lock);
637         if (dev->state == STATE_CANCELED) {
638                 /* report cancelation to userspace */
639                 dev->state = STATE_READY;
640                 spin_unlock_irq(&dev->lock);
641                 return -ECANCELED;
642         }
643         dev->state = STATE_BUSY;
644         spin_unlock_irq(&dev->lock);
645
646         mutex_lock(&dev->read_mutex);
647         if (dev->state == STATE_OFFLINE) {
648                 r = -EIO;
649                 mutex_unlock(&dev->read_mutex);
650                 goto done;
651         }
652 requeue_req:
653         /* queue a request */
654         req = dev->rx_req[0];
655         req->length = len;
656         dev->rx_done = 0;
657         mutex_unlock(&dev->read_mutex);
658         ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
659         if (ret < 0) {
660                 r = -EIO;
661                 goto done;
662         } else {
663                 DBG(cdev, "rx %pK queue\n", req);
664         }
665
666         /* wait for a request to complete */
667         ret = wait_event_interruptible(dev->read_wq,
668                                 dev->rx_done || dev->state != STATE_BUSY);
669         if (dev->state == STATE_CANCELED) {
670                 r = -ECANCELED;
671                 if (!dev->rx_done)
672                         usb_ep_dequeue(dev->ep_out, req);
673                 spin_lock_irq(&dev->lock);
674                 dev->state = STATE_CANCELED;
675                 spin_unlock_irq(&dev->lock);
676                 goto done;
677         }
678         if (ret < 0) {
679                 r = ret;
680                 usb_ep_dequeue(dev->ep_out, req);
681                 goto done;
682         }
683         mutex_lock(&dev->read_mutex);
684         if (dev->state == STATE_BUSY) {
685                 /* If we got a 0-len packet, throw it back and try again. */
686                 if (req->actual == 0)
687                         goto requeue_req;
688
689                 DBG(cdev, "rx %pK %d\n", req, req->actual);
690                 xfer = (req->actual < count) ? req->actual : count;
691                 r = xfer;
692                 if (copy_to_user(buf, req->buf, xfer))
693                         r = -EFAULT;
694         } else
695                 r = -EIO;
696
697         mutex_unlock(&dev->read_mutex);
698 done:
699         spin_lock_irq(&dev->lock);
700         if (dev->state == STATE_CANCELED)
701                 r = -ECANCELED;
702         else if (dev->state != STATE_OFFLINE)
703                 dev->state = STATE_READY;
704         spin_unlock_irq(&dev->lock);
705
706         DBG(cdev, "mtp_read returning %zd state:%d\n", r, dev->state);
707         return r;
708 }
709
710 static ssize_t mtp_write(struct file *fp, const char __user *buf,
711         size_t count, loff_t *pos)
712 {
713         struct mtp_dev *dev = fp->private_data;
714         struct usb_composite_dev *cdev = dev->cdev;
715         struct usb_request *req = 0;
716         ssize_t r = count;
717         unsigned xfer;
718         int sendZLP = 0;
719         int ret;
720
721         DBG(cdev, "mtp_write(%zu) state:%d\n", count, dev->state);
722
723         spin_lock_irq(&dev->lock);
724         if (dev->state == STATE_CANCELED) {
725                 /* report cancelation to userspace */
726                 dev->state = STATE_READY;
727                 spin_unlock_irq(&dev->lock);
728                 return -ECANCELED;
729         }
730         if (dev->state == STATE_OFFLINE) {
731                 spin_unlock_irq(&dev->lock);
732                 return -ENODEV;
733         }
734         dev->state = STATE_BUSY;
735         spin_unlock_irq(&dev->lock);
736
737         /* we need to send a zero length packet to signal the end of transfer
738          * if the transfer size is aligned to a packet boundary.
739          */
740         if ((count & (dev->ep_in->maxpacket - 1)) == 0)
741                 sendZLP = 1;
742
743         while (count > 0 || sendZLP) {
744                 /* so we exit after sending ZLP */
745                 if (count == 0)
746                         sendZLP = 0;
747
748                 if (dev->state != STATE_BUSY) {
749                         DBG(cdev, "mtp_write dev->error\n");
750                         r = -EIO;
751                         break;
752                 }
753
754                 /* get an idle tx request to use */
755                 req = 0;
756                 ret = wait_event_interruptible(dev->write_wq,
757                         ((req = mtp_req_get(dev, &dev->tx_idle))
758                                 || dev->state != STATE_BUSY));
759                 if (!req) {
760                         DBG(cdev, "mtp_write request NULL ret:%d state:%d\n",
761                                 ret, dev->state);
762                         r = ret;
763                         break;
764                 }
765
766                 if (count > mtp_tx_req_len)
767                         xfer = mtp_tx_req_len;
768                 else
769                         xfer = count;
770                 if (xfer && copy_from_user(req->buf, buf, xfer)) {
771                         r = -EFAULT;
772                         break;
773                 }
774
775                 req->length = xfer;
776                 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
777                 if (ret < 0) {
778                         DBG(cdev, "mtp_write: xfer error %d\n", ret);
779                         r = -EIO;
780                         break;
781                 }
782
783                 buf += xfer;
784                 count -= xfer;
785
786                 /* zero this so we don't try to free it on error exit */
787                 req = 0;
788         }
789
790         if (req)
791                 mtp_req_put(dev, &dev->tx_idle, req);
792
793         spin_lock_irq(&dev->lock);
794         if (dev->state == STATE_CANCELED)
795                 r = -ECANCELED;
796         else if (dev->state != STATE_OFFLINE)
797                 dev->state = STATE_READY;
798         spin_unlock_irq(&dev->lock);
799
800         DBG(cdev, "mtp_write returning %zd state:%d\n", r, dev->state);
801         return r;
802 }
803
804 /* read from a local file and write to USB */
805 static void send_file_work(struct work_struct *data)
806 {
807         struct mtp_dev *dev = container_of(data, struct mtp_dev,
808                                                 send_file_work);
809         struct usb_composite_dev *cdev = dev->cdev;
810         struct usb_request *req = 0;
811         struct mtp_data_header *header;
812         struct file *filp;
813         loff_t offset;
814         int64_t count;
815         int xfer, ret, hdr_size;
816         int r = 0;
817         int sendZLP = 0;
818         ktime_t start_time;
819
820         /* read our parameters */
821         smp_rmb();
822         filp = dev->xfer_file;
823         offset = dev->xfer_file_offset;
824         count = dev->xfer_file_length;
825
826         DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
827
828         if (dev->xfer_send_header) {
829                 hdr_size = sizeof(struct mtp_data_header);
830                 count += hdr_size;
831         } else {
832                 hdr_size = 0;
833         }
834
835         /* we need to send a zero length packet to signal the end of transfer
836          * if the transfer size is aligned to a packet boundary.
837          */
838         if ((count & (dev->ep_in->maxpacket - 1)) == 0)
839                 sendZLP = 1;
840
841         while (count > 0 || sendZLP) {
842                 /* so we exit after sending ZLP */
843                 if (count == 0)
844                         sendZLP = 0;
845
846                 /* get an idle tx request to use */
847                 req = 0;
848                 ret = wait_event_interruptible(dev->write_wq,
849                         (req = mtp_req_get(dev, &dev->tx_idle))
850                         || dev->state != STATE_BUSY);
851                 if (dev->state == STATE_CANCELED) {
852                         r = -ECANCELED;
853                         break;
854                 }
855                 if (!req) {
856                         DBG(cdev,
857                                 "send_file_work request NULL ret:%d state:%d\n",
858                                 ret, dev->state);
859                         r = ret;
860                         break;
861                 }
862
863                 if (count > mtp_tx_req_len)
864                         xfer = mtp_tx_req_len;
865                 else
866                         xfer = count;
867
868                 if (hdr_size) {
869                         /* prepend MTP data header */
870                         header = (struct mtp_data_header *)req->buf;
871                         /*
872                          * set file size with header according to
873                          * MTP Specification v1.0
874                          */
875                         header->length = (count > MTP_MAX_FILE_SIZE) ?
876                                 MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
877                         header->type = __cpu_to_le16(2); /* data packet */
878                         header->command = __cpu_to_le16(dev->xfer_command);
879                         header->transaction_id =
880                                         __cpu_to_le32(dev->xfer_transaction_id);
881                 }
882                 start_time = ktime_get();
883                 ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
884                                                                 &offset);
885                 if (ret < 0) {
886                         r = ret;
887                         break;
888                 }
889
890                 xfer = ret + hdr_size;
891                 dev->perf[dev->dbg_read_index].vfs_rtime =
892                         ktime_to_us(ktime_sub(ktime_get(), start_time));
893                 dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
894                 dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
895                 hdr_size = 0;
896
897                 req->length = xfer;
898                 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
899                 if (ret < 0) {
900                         DBG(cdev, "send_file_work: xfer error %d\n", ret);
901                         if (dev->state != STATE_OFFLINE)
902                                 dev->state = STATE_ERROR;
903                         r = -EIO;
904                         break;
905                 }
906
907                 count -= xfer;
908
909                 /* zero this so we don't try to free it on error exit */
910                 req = 0;
911         }
912
913         if (req)
914                 mtp_req_put(dev, &dev->tx_idle, req);
915
916         DBG(cdev, "send_file_work returning %d state:%d\n", r, dev->state);
917         /* write the result */
918         dev->xfer_result = r;
919         smp_wmb();
920 }
921
922 /* read from USB and write to a local file */
923 static void receive_file_work(struct work_struct *data)
924 {
925         struct mtp_dev *dev = container_of(data, struct mtp_dev,
926                                                 receive_file_work);
927         struct usb_composite_dev *cdev = dev->cdev;
928         struct usb_request *read_req = NULL, *write_req = NULL;
929         struct file *filp;
930         loff_t offset;
931         int64_t count;
932         int ret, cur_buf = 0;
933         int r = 0;
934         ktime_t start_time;
935
936         /* read our parameters */
937         smp_rmb();
938         filp = dev->xfer_file;
939         offset = dev->xfer_file_offset;
940         count = dev->xfer_file_length;
941
942         DBG(cdev, "receive_file_work(%lld)\n", count);
943         if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
944                 DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
945                                                 count, dev->ep_out->maxpacket);
946
947         while (count > 0 || write_req) {
948                 if (count > 0) {
949                         mutex_lock(&dev->read_mutex);
950                         if (dev->state == STATE_OFFLINE) {
951                                 r = -EIO;
952                                 mutex_unlock(&dev->read_mutex);
953                                 break;
954                         }
955                         /* queue a request */
956                         read_req = dev->rx_req[cur_buf];
957                         cur_buf = (cur_buf + 1) % RX_REQ_MAX;
958
959                         /* some h/w expects size to be aligned to ep's MTU */
960                         read_req->length = mtp_rx_req_len;
961
962                         dev->rx_done = 0;
963                         mutex_unlock(&dev->read_mutex);
964                         ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
965                         if (ret < 0) {
966                                 r = -EIO;
967                                 if (dev->state != STATE_OFFLINE)
968                                         dev->state = STATE_ERROR;
969                                 break;
970                         }
971                 }
972
973                 if (write_req) {
974                         DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
975                         start_time = ktime_get();
976                         mutex_lock(&dev->read_mutex);
977                         if (dev->state == STATE_OFFLINE) {
978                                 r = -EIO;
979                                 mutex_unlock(&dev->read_mutex);
980                                 break;
981                         }
982                         ret = vfs_write(filp, write_req->buf, write_req->actual,
983                                 &offset);
984                         DBG(cdev, "vfs_write %d\n", ret);
985                         if (ret != write_req->actual) {
986                                 r = -EIO;
987                                 mutex_unlock(&dev->read_mutex);
988                                 if (dev->state != STATE_OFFLINE)
989                                         dev->state = STATE_ERROR;
990                                 break;
991                         }
992                         mutex_unlock(&dev->read_mutex);
993                         dev->perf[dev->dbg_write_index].vfs_wtime =
994                                 ktime_to_us(ktime_sub(ktime_get(), start_time));
995                         dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
996                         dev->dbg_write_index =
997                                 (dev->dbg_write_index + 1) % MAX_ITERATION;
998                         write_req = NULL;
999                 }
1000
1001                 if (read_req) {
1002                         /* wait for our last read to complete */
1003                         ret = wait_event_interruptible(dev->read_wq,
1004                                 dev->rx_done || dev->state != STATE_BUSY);
1005                         if (dev->state == STATE_CANCELED
1006                                         || dev->state == STATE_OFFLINE) {
1007                                 if (dev->state == STATE_OFFLINE)
1008                                         r = -EIO;
1009                                 else
1010                                         r = -ECANCELED;
1011                                 if (!dev->rx_done)
1012                                         usb_ep_dequeue(dev->ep_out, read_req);
1013                                 break;
1014                         }
1015
1016                         mutex_lock(&dev->read_mutex);
1017                         if (dev->state == STATE_OFFLINE) {
1018                                 r = -EIO;
1019                                 mutex_unlock(&dev->read_mutex);
1020                                 break;
1021                         }
1022                         /* Check if we aligned the size due to MTU constraint */
1023                         if (count < read_req->length)
1024                                 read_req->actual = (read_req->actual > count ?
1025                                                 count : read_req->actual);
1026                         /* if xfer_file_length is 0xFFFFFFFF, then we read until
1027                          * we get a zero length packet
1028                          */
1029                         if (count != 0xFFFFFFFF)
1030                                 count -= read_req->actual;
1031                         if (read_req->actual < read_req->length) {
1032                                 /*
1033                                  * short packet is used to signal EOF for
1034                                  * sizes > 4 gig
1035                                  */
1036                                 DBG(cdev, "got short packet\n");
1037                                 count = 0;
1038                         }
1039
1040                         write_req = read_req;
1041                         read_req = NULL;
1042                         mutex_unlock(&dev->read_mutex);
1043                 }
1044         }
1045
1046         DBG(cdev, "receive_file_work returning %d\n", r);
1047         /* write the result */
1048         dev->xfer_result = r;
1049         smp_wmb();
1050 }
1051
1052 static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
1053 {
1054         struct usb_request *req = NULL;
1055         int ret;
1056         int length = event->length;
1057
1058         DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
1059
1060         if (length < 0 || length > INTR_BUFFER_SIZE)
1061                 return -EINVAL;
1062         if (dev->state == STATE_OFFLINE)
1063                 return -ENODEV;
1064
1065         ret = wait_event_interruptible_timeout(dev->intr_wq,
1066                         (req = mtp_req_get(dev, &dev->intr_idle)),
1067                         msecs_to_jiffies(1000));
1068         if (!req)
1069                 return -ETIME;
1070
1071         if (copy_from_user(req->buf, (void __user *)event->data, length)) {
1072                 mtp_req_put(dev, &dev->intr_idle, req);
1073                 return -EFAULT;
1074         }
1075         req->length = length;
1076         ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
1077         if (ret)
1078                 mtp_req_put(dev, &dev->intr_idle, req);
1079
1080         return ret;
1081 }
1082
1083 static long mtp_send_receive_ioctl(struct file *fp, unsigned code,
1084         struct mtp_file_range *mfr)
1085 {
1086         struct mtp_dev *dev = fp->private_data;
1087         struct file *filp = NULL;
1088         struct work_struct *work;
1089         int ret = -EINVAL;
1090
1091         if (mtp_lock(&dev->ioctl_excl)) {
1092                 DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
1093                 return -EBUSY;
1094         }
1095
1096         spin_lock_irq(&dev->lock);
1097         if (dev->state == STATE_CANCELED) {
1098                 /* report cancellation to userspace */
1099                 dev->state = STATE_READY;
1100                 spin_unlock_irq(&dev->lock);
1101                 ret = -ECANCELED;
1102                 goto out;
1103         }
1104         if (dev->state == STATE_OFFLINE) {
1105                 spin_unlock_irq(&dev->lock);
1106                 ret = -ENODEV;
1107                 goto out;
1108         }
1109         dev->state = STATE_BUSY;
1110         spin_unlock_irq(&dev->lock);
1111
1112         /* hold a reference to the file while we are working with it */
1113         filp = fget(mfr->fd);
1114         if (!filp) {
1115                 ret = -EBADF;
1116                 goto fail;
1117         }
1118
1119         /* write the parameters */
1120         dev->xfer_file = filp;
1121         dev->xfer_file_offset = mfr->offset;
1122         dev->xfer_file_length = mfr->length;
1123         /* make sure write is done before parameters are read */
1124         smp_wmb();
1125
1126         if (code == MTP_SEND_FILE_WITH_HEADER) {
1127                 work = &dev->send_file_work;
1128                 dev->xfer_send_header = 1;
1129                 dev->xfer_command = mfr->command;
1130                 dev->xfer_transaction_id = mfr->transaction_id;
1131         } else if (code == MTP_SEND_FILE) {
1132                 work = &dev->send_file_work;
1133                 dev->xfer_send_header = 0;
1134         } else {
1135                 work = &dev->receive_file_work;
1136         }
1137
1138         /* We do the file transfer on a work queue so it will run
1139          * in kernel context, which is necessary for vfs_read and
1140          * vfs_write to use our buffers in the kernel address space.
1141          */
1142         queue_work(dev->wq, work);
1143         /* wait for operation to complete */
1144         flush_workqueue(dev->wq);
1145         fput(filp);
1146
1147         /* read the result */
1148         smp_rmb();
1149         ret = dev->xfer_result;
1150
1151 fail:
1152         spin_lock_irq(&dev->lock);
1153         if (dev->state == STATE_CANCELED)
1154                 ret = -ECANCELED;
1155         else if (dev->state != STATE_OFFLINE)
1156                 dev->state = STATE_READY;
1157         spin_unlock_irq(&dev->lock);
1158 out:
1159         mtp_unlock(&dev->ioctl_excl);
1160         DBG(dev->cdev, "ioctl returning %d state:%d\n", ret, dev->state);
1161         return ret;
1162 }
1163
1164 static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
1165 {
1166         struct mtp_dev *dev = fp->private_data;
1167         struct mtp_file_range   mfr;
1168         struct mtp_event        event;
1169         int ret = -EINVAL;
1170
1171         switch (code) {
1172         case MTP_SEND_FILE:
1173         case MTP_RECEIVE_FILE:
1174         case MTP_SEND_FILE_WITH_HEADER:
1175                 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
1176                         ret = -EFAULT;
1177                         goto fail;
1178                 }
1179                 ret = mtp_send_receive_ioctl(fp, code, &mfr);
1180         break;
1181         case MTP_SEND_EVENT:
1182                 if (mtp_lock(&dev->ioctl_excl))
1183                         return -EBUSY;
1184                 /* return here so we don't change dev->state below,
1185                  * which would interfere with bulk transfer state.
1186                  */
1187                 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
1188                         ret = -EFAULT;
1189                 else
1190                         ret = mtp_send_event(dev, &event);
1191                 mtp_unlock(&dev->ioctl_excl);
1192         break;
1193         default:
1194                 DBG(dev->cdev, "unknown ioctl code: %d\n", code);
1195         }
1196 fail:
1197         return ret;
1198 }
1199
1200 /*
1201  * 32 bit userspace calling into 64 bit kernel. handle ioctl code
1202  * and userspace pointer
1203 */
1204 #ifdef CONFIG_COMPAT
1205 static long compat_mtp_ioctl(struct file *fp, unsigned code,
1206         unsigned long value)
1207 {
1208         struct mtp_dev *dev = fp->private_data;
1209         struct mtp_file_range   mfr;
1210         struct __compat_mtp_file_range  cmfr;
1211         struct mtp_event        event;
1212         struct __compat_mtp_event cevent;
1213         unsigned cmd;
1214         bool send_file = false;
1215         int ret = -EINVAL;
1216
1217         switch (code) {
1218         case COMPAT_MTP_SEND_FILE:
1219                 cmd = MTP_SEND_FILE;
1220                 send_file = true;
1221                 break;
1222         case COMPAT_MTP_RECEIVE_FILE:
1223                 cmd = MTP_RECEIVE_FILE;
1224                 send_file = true;
1225                 break;
1226         case COMPAT_MTP_SEND_FILE_WITH_HEADER:
1227                 cmd = MTP_SEND_FILE_WITH_HEADER;
1228                 send_file = true;
1229                 break;
1230         case COMPAT_MTP_SEND_EVENT:
1231                 cmd = MTP_SEND_EVENT;
1232                 break;
1233         default:
1234                 DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
1235                 ret = -ENOIOCTLCMD;
1236                 goto fail;
1237         }
1238
1239         if (send_file) {
1240                 if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
1241                         ret = -EFAULT;
1242                         goto fail;
1243                 }
1244                 mfr.fd = cmfr.fd;
1245                 mfr.offset = cmfr.offset;
1246                 mfr.length = cmfr.length;
1247                 mfr.command = cmfr.command;
1248                 mfr.transaction_id = cmfr.transaction_id;
1249                 ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
1250         } else {
1251                 if (mtp_lock(&dev->ioctl_excl))
1252                         return -EBUSY;
1253                 /* return here so we don't change dev->state below,
1254                  * which would interfere with bulk transfer state.
1255                  */
1256                 if (copy_from_user(&cevent, (void __user *)value,
1257                         sizeof(cevent))) {
1258                         ret = -EFAULT;
1259                         goto fail;
1260                 }
1261                 event.length = cevent.length;
1262                 event.data = compat_ptr(cevent.data);
1263                 ret = mtp_send_event(dev, &event);
1264                 mtp_unlock(&dev->ioctl_excl);
1265         }
1266 fail:
1267         return ret;
1268 }
1269 #endif
1270
1271 static int mtp_open(struct inode *ip, struct file *fp)
1272 {
1273         printk(KERN_INFO "mtp_open\n");
1274         if (mtp_lock(&_mtp_dev->open_excl)) {
1275                 pr_err("%s mtp_release not called returning EBUSY\n", __func__);
1276                 return -EBUSY;
1277         }
1278
1279         /* clear any error condition */
1280         if (_mtp_dev->state != STATE_OFFLINE)
1281                 _mtp_dev->state = STATE_READY;
1282
1283         fp->private_data = _mtp_dev;
1284         return 0;
1285 }
1286
1287 static int mtp_release(struct inode *ip, struct file *fp)
1288 {
1289         printk(KERN_INFO "mtp_release\n");
1290
1291         mtp_unlock(&_mtp_dev->open_excl);
1292         return 0;
1293 }
1294
1295 /* file operations for /dev/mtp_usb */
1296 static const struct file_operations mtp_fops = {
1297         .owner = THIS_MODULE,
1298         .read = mtp_read,
1299         .write = mtp_write,
1300         .unlocked_ioctl = mtp_ioctl,
1301 #ifdef CONFIG_COMPAT
1302         .compat_ioctl = compat_mtp_ioctl,
1303 #endif
1304         .open = mtp_open,
1305         .release = mtp_release,
1306 };
1307
1308 static struct miscdevice mtp_device = {
1309         .minor = MISC_DYNAMIC_MINOR,
1310         .name = mtp_shortname,
1311         .fops = &mtp_fops,
1312 };
1313
1314 static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
1315                                 const struct usb_ctrlrequest *ctrl)
1316 {
1317         struct mtp_dev *dev = _mtp_dev;
1318         int     value = -EOPNOTSUPP;
1319         u16     w_index = le16_to_cpu(ctrl->wIndex);
1320         u16     w_value = le16_to_cpu(ctrl->wValue);
1321         u16     w_length = le16_to_cpu(ctrl->wLength);
1322         unsigned long   flags;
1323
1324         VDBG(cdev, "mtp_ctrlrequest "
1325                         "%02x.%02x v%04x i%04x l%u\n",
1326                         ctrl->bRequestType, ctrl->bRequest,
1327                         w_value, w_index, w_length);
1328
1329         /* Handle MTP OS string */
1330         if (ctrl->bRequestType ==
1331                         (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1332                         && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1333                         && (w_value >> 8) == USB_DT_STRING
1334                         && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1335                 value = (w_length < sizeof(mtp_os_string)
1336                                 ? w_length : sizeof(mtp_os_string));
1337                 memcpy(cdev->req->buf, mtp_os_string, value);
1338         } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1339                 /* Handle MTP OS descriptor */
1340                 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1341                         ctrl->bRequest, w_index, w_value, w_length);
1342
1343                 if (ctrl->bRequest == 1
1344                                 && (ctrl->bRequestType & USB_DIR_IN)
1345                                 && (w_index == 4 || w_index == 5)) {
1346                         if (!dev->is_ptp) {
1347                                 value = (w_length <
1348                                                 sizeof(mtp_ext_config_desc) ?
1349                                                 w_length :
1350                                                 sizeof(mtp_ext_config_desc));
1351                                 memcpy(cdev->req->buf, &mtp_ext_config_desc,
1352                                                                         value);
1353                         } else {
1354                                 value = (w_length <
1355                                                 sizeof(ptp_ext_config_desc) ?
1356                                                 w_length :
1357                                                 sizeof(ptp_ext_config_desc));
1358                                 memcpy(cdev->req->buf, &ptp_ext_config_desc,
1359                                                                         value);
1360                         }
1361                 }
1362         } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1363                 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1364                         ctrl->bRequest, w_index, w_value, w_length);
1365
1366                 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1367                                 && w_value == 0) {
1368                         DBG(cdev, "MTP_REQ_CANCEL\n");
1369
1370                         spin_lock_irqsave(&dev->lock, flags);
1371                         if (dev->state == STATE_BUSY) {
1372                                 dev->state = STATE_CANCELED;
1373                                 wake_up(&dev->read_wq);
1374                                 wake_up(&dev->write_wq);
1375                         }
1376                         spin_unlock_irqrestore(&dev->lock, flags);
1377
1378                         /* We need to queue a request to read the remaining
1379                          *  bytes, but we don't actually need to look at
1380                          * the contents.
1381                          */
1382                         value = w_length;
1383                 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1384                                 && w_index == 0 && w_value == 0) {
1385                         struct mtp_device_status *status = cdev->req->buf;
1386
1387                         status->wLength =
1388                                 __constant_cpu_to_le16(sizeof(*status));
1389
1390                         DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1391                         spin_lock_irqsave(&dev->lock, flags);
1392                         /* device status is "busy" until we report
1393                          * the cancelation to userspace
1394                          */
1395                         if (dev->state == STATE_CANCELED)
1396                                 status->wCode =
1397                                         __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1398                         else
1399                                 status->wCode =
1400                                         __cpu_to_le16(MTP_RESPONSE_OK);
1401                         spin_unlock_irqrestore(&dev->lock, flags);
1402                         value = sizeof(*status);
1403                 }
1404         }
1405
1406         /* respond with data transfer or status phase? */
1407         if (value >= 0) {
1408                 int rc;
1409
1410                 cdev->req->zero = value < w_length;
1411                 cdev->req->length = value;
1412                 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1413                 if (rc < 0)
1414                         ERROR(cdev, "%s: response queue error\n", __func__);
1415         }
1416         return value;
1417 }
1418
1419 static int
1420 mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
1421 {
1422         struct usb_composite_dev *cdev = c->cdev;
1423         struct mtp_dev  *dev = func_to_mtp(f);
1424         int                     id;
1425         int                     ret;
1426         struct mtp_instance *fi_mtp;
1427
1428         dev->cdev = cdev;
1429         DBG(cdev, "mtp_function_bind dev: %pK\n", dev);
1430
1431         /* allocate interface ID(s) */
1432         id = usb_interface_id(c, f);
1433         if (id < 0)
1434                 return id;
1435         mtp_interface_desc.bInterfaceNumber = id;
1436
1437         if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1438                 ret = usb_string_id(c->cdev);
1439                 if (ret < 0)
1440                         return ret;
1441                 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1442                 mtp_interface_desc.iInterface = ret;
1443         }
1444
1445         fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
1446
1447         if (cdev->use_os_string) {
1448                 f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
1449                                         GFP_KERNEL);
1450                 if (!f->os_desc_table)
1451                         return -ENOMEM;
1452                 f->os_desc_n = 1;
1453                 f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
1454         }
1455
1456         /* allocate endpoints */
1457         ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
1458                         &mtp_fullspeed_out_desc, &mtp_intr_desc);
1459         if (ret)
1460                 return ret;
1461
1462         /* support high speed hardware */
1463         if (gadget_is_dualspeed(c->cdev->gadget)) {
1464                 mtp_highspeed_in_desc.bEndpointAddress =
1465                         mtp_fullspeed_in_desc.bEndpointAddress;
1466                 mtp_highspeed_out_desc.bEndpointAddress =
1467                         mtp_fullspeed_out_desc.bEndpointAddress;
1468         }
1469         /* support super speed hardware */
1470         if (gadget_is_superspeed(c->cdev->gadget)) {
1471                 unsigned max_burst;
1472
1473                 /* Calculate bMaxBurst, we know packet size is 1024 */
1474                 max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
1475                 mtp_ss_in_desc.bEndpointAddress =
1476                         mtp_fullspeed_in_desc.bEndpointAddress;
1477                 mtp_ss_in_comp_desc.bMaxBurst = max_burst;
1478                 mtp_ss_out_desc.bEndpointAddress =
1479                         mtp_fullspeed_out_desc.bEndpointAddress;
1480                 mtp_ss_out_comp_desc.bMaxBurst = max_burst;
1481         }
1482
1483         /* support super speed hardware */
1484         if (gadget_is_superspeed(c->cdev->gadget)) {
1485                 mtp_ss_in_desc.bEndpointAddress =
1486                         mtp_fullspeed_in_desc.bEndpointAddress;
1487                 mtp_ss_out_desc.bEndpointAddress =
1488                         mtp_fullspeed_out_desc.bEndpointAddress;
1489         }
1490
1491         fi_mtp->func_inst.f = &dev->function;
1492         DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
1493                 gadget_is_superspeed(c->cdev->gadget) ? "super" :
1494                 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
1495                 f->name, dev->ep_in->name, dev->ep_out->name);
1496         return 0;
1497 }
1498
1499 static void
1500 mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1501 {
1502         struct mtp_dev  *dev = func_to_mtp(f);
1503         struct mtp_instance *fi_mtp;
1504         struct usb_request *req;
1505         int i;
1506         fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
1507         mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
1508         mutex_lock(&dev->read_mutex);
1509         while ((req = mtp_req_get(dev, &dev->tx_idle)))
1510                 mtp_request_free(req, dev->ep_in);
1511         for (i = 0; i < RX_REQ_MAX; i++)
1512                 mtp_request_free(dev->rx_req[i], dev->ep_out);
1513         while ((req = mtp_req_get(dev, &dev->intr_idle)))
1514                 mtp_request_free(req, dev->ep_intr);
1515         mutex_unlock(&dev->read_mutex);
1516         dev->state = STATE_OFFLINE;
1517         dev->is_ptp = false;
1518         kfree(f->os_desc_table);
1519         f->os_desc_n = 0;
1520         fi_mtp->func_inst.f = NULL;
1521 }
1522
1523 static int mtp_function_set_alt(struct usb_function *f,
1524                 unsigned intf, unsigned alt)
1525 {
1526         struct mtp_dev  *dev = func_to_mtp(f);
1527         struct usb_composite_dev *cdev = f->config->cdev;
1528         int ret;
1529
1530         DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1531
1532         ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
1533         if (ret)
1534                 return ret;
1535
1536         ret = usb_ep_enable(dev->ep_in);
1537         if (ret)
1538                 return ret;
1539
1540         ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
1541         if (ret)
1542                 return ret;
1543
1544         ret = usb_ep_enable(dev->ep_out);
1545         if (ret) {
1546                 usb_ep_disable(dev->ep_in);
1547                 return ret;
1548         }
1549
1550         ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
1551         if (ret)
1552                 return ret;
1553
1554         ret = usb_ep_enable(dev->ep_intr);
1555         if (ret) {
1556                 usb_ep_disable(dev->ep_out);
1557                 usb_ep_disable(dev->ep_in);
1558                 return ret;
1559         }
1560         dev->state = STATE_READY;
1561
1562         /* readers may be blocked waiting for us to go online */
1563         wake_up(&dev->read_wq);
1564         return 0;
1565 }
1566
1567 static void mtp_function_disable(struct usb_function *f)
1568 {
1569         struct mtp_dev  *dev = func_to_mtp(f);
1570         struct usb_composite_dev        *cdev = dev->cdev;
1571
1572         DBG(cdev, "mtp_function_disable\n");
1573         dev->state = STATE_OFFLINE;
1574         usb_ep_disable(dev->ep_in);
1575         usb_ep_disable(dev->ep_out);
1576         usb_ep_disable(dev->ep_intr);
1577
1578         /* readers may be blocked waiting for us to go online */
1579         wake_up(&dev->read_wq);
1580
1581         VDBG(cdev, "%s disabled\n", dev->function.name);
1582 }
1583
1584 static int debug_mtp_read_stats(struct seq_file *s, void *unused)
1585 {
1586         struct mtp_dev *dev = _mtp_dev;
1587         int i;
1588         unsigned long flags;
1589         unsigned min, max = 0, sum = 0, iteration = 0;
1590
1591         seq_puts(s, "\n=======================\n");
1592         seq_puts(s, "MTP Write Stats:\n");
1593         seq_puts(s, "\n=======================\n");
1594         spin_lock_irqsave(&dev->lock, flags);
1595         min = dev->perf[0].vfs_wtime;
1596         for (i = 0; i < MAX_ITERATION; i++) {
1597                 seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
1598                                 dev->perf[i].vfs_wbytes,
1599                                 dev->perf[i].vfs_wtime);
1600                 if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
1601                         sum += dev->perf[i].vfs_wtime;
1602                         if (min > dev->perf[i].vfs_wtime)
1603                                 min = dev->perf[i].vfs_wtime;
1604                         if (max < dev->perf[i].vfs_wtime)
1605                                 max = dev->perf[i].vfs_wtime;
1606                         iteration++;
1607                 }
1608         }
1609
1610         seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
1611                                                 min, max, sum / iteration);
1612         min = max = sum = iteration = 0;
1613         seq_puts(s, "\n=======================\n");
1614         seq_puts(s, "MTP Read Stats:\n");
1615         seq_puts(s, "\n=======================\n");
1616
1617         min = dev->perf[0].vfs_rtime;
1618         for (i = 0; i < MAX_ITERATION; i++) {
1619                 seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
1620                                 dev->perf[i].vfs_rbytes,
1621                                 dev->perf[i].vfs_rtime);
1622                 if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
1623                         sum += dev->perf[i].vfs_rtime;
1624                         if (min > dev->perf[i].vfs_rtime)
1625                                 min = dev->perf[i].vfs_rtime;
1626                         if (max < dev->perf[i].vfs_rtime)
1627                                 max = dev->perf[i].vfs_rtime;
1628                         iteration++;
1629                 }
1630         }
1631
1632         seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
1633                                                 min, max, sum / iteration);
1634         spin_unlock_irqrestore(&dev->lock, flags);
1635         return 0;
1636 }
1637
1638 static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
1639                                  size_t count, loff_t *ppos)
1640 {
1641         int clear_stats;
1642         unsigned long flags;
1643         struct mtp_dev *dev = _mtp_dev;
1644
1645         if (buf == NULL) {
1646                 pr_err("[%s] EINVAL\n", __func__);
1647                 goto done;
1648         }
1649
1650         if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
1651                 pr_err("Wrong value. To clear stats, enter value as 0.\n");
1652                 goto done;
1653         }
1654
1655         spin_lock_irqsave(&dev->lock, flags);
1656         memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
1657         dev->dbg_read_index = 0;
1658         dev->dbg_write_index = 0;
1659         spin_unlock_irqrestore(&dev->lock, flags);
1660 done:
1661         return count;
1662 }
1663
1664 static int debug_mtp_open(struct inode *inode, struct file *file)
1665 {
1666         return single_open(file, debug_mtp_read_stats, inode->i_private);
1667 }
1668
1669 static const struct file_operations debug_mtp_ops = {
1670         .open = debug_mtp_open,
1671         .read = seq_read,
1672         .write = debug_mtp_reset_stats,
1673 };
1674
1675 struct dentry *dent_mtp;
1676 static void mtp_debugfs_init(void)
1677 {
1678         struct dentry *dent_mtp_status;
1679
1680         dent_mtp = debugfs_create_dir("usb_mtp", 0);
1681         if (!dent_mtp || IS_ERR(dent_mtp))
1682                 return;
1683
1684         dent_mtp_status = debugfs_create_file("status", S_IRUGO | S_IWUSR,
1685                                         dent_mtp, 0, &debug_mtp_ops);
1686         if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
1687                 debugfs_remove(dent_mtp);
1688                 dent_mtp = NULL;
1689                 return;
1690         }
1691 }
1692
1693 static void mtp_debugfs_remove(void)
1694 {
1695         debugfs_remove_recursive(dent_mtp);
1696 }
1697
1698 static int __mtp_setup(struct mtp_instance *fi_mtp)
1699 {
1700         struct mtp_dev *dev;
1701         int ret;
1702
1703         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1704
1705         if (fi_mtp != NULL)
1706                 fi_mtp->dev = dev;
1707
1708         if (!dev)
1709                 return -ENOMEM;
1710
1711         spin_lock_init(&dev->lock);
1712         init_waitqueue_head(&dev->read_wq);
1713         init_waitqueue_head(&dev->write_wq);
1714         init_waitqueue_head(&dev->intr_wq);
1715         atomic_set(&dev->open_excl, 0);
1716         atomic_set(&dev->ioctl_excl, 0);
1717         INIT_LIST_HEAD(&dev->tx_idle);
1718         INIT_LIST_HEAD(&dev->intr_idle);
1719
1720         dev->wq = create_singlethread_workqueue("f_mtp");
1721         if (!dev->wq) {
1722                 ret = -ENOMEM;
1723                 goto err1;
1724         }
1725         INIT_WORK(&dev->send_file_work, send_file_work);
1726         INIT_WORK(&dev->receive_file_work, receive_file_work);
1727
1728         _mtp_dev = dev;
1729
1730         ret = misc_register(&mtp_device);
1731         if (ret)
1732                 goto err2;
1733
1734         mtp_debugfs_init();
1735         return 0;
1736
1737 err2:
1738         destroy_workqueue(dev->wq);
1739 err1:
1740         _mtp_dev = NULL;
1741         kfree(dev);
1742         printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1743         return ret;
1744 }
1745
1746 static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
1747 {
1748         return __mtp_setup(fi_mtp);
1749 }
1750
1751
1752 static void mtp_cleanup(void)
1753 {
1754         struct mtp_dev *dev = _mtp_dev;
1755
1756         if (!dev)
1757                 return;
1758
1759         mtp_debugfs_remove();
1760         misc_deregister(&mtp_device);
1761         destroy_workqueue(dev->wq);
1762         _mtp_dev = NULL;
1763         kfree(dev);
1764 }
1765
1766 static struct mtp_instance *to_mtp_instance(struct config_item *item)
1767 {
1768         return container_of(to_config_group(item), struct mtp_instance,
1769                 func_inst.group);
1770 }
1771
1772 static void mtp_attr_release(struct config_item *item)
1773 {
1774         struct mtp_instance *fi_mtp = to_mtp_instance(item);
1775
1776         usb_put_function_instance(&fi_mtp->func_inst);
1777 }
1778
1779 static struct configfs_item_operations mtp_item_ops = {
1780         .release        = mtp_attr_release,
1781 };
1782
1783 static struct config_item_type mtp_func_type = {
1784         .ct_item_ops    = &mtp_item_ops,
1785         .ct_owner       = THIS_MODULE,
1786 };
1787
1788
1789 static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
1790 {
1791         return container_of(fi, struct mtp_instance, func_inst);
1792 }
1793
1794 static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
1795 {
1796         struct mtp_instance *fi_mtp;
1797         char *ptr;
1798         int name_len;
1799
1800         name_len = strlen(name) + 1;
1801         if (name_len > MAX_INST_NAME_LEN)
1802                 return -ENAMETOOLONG;
1803
1804         ptr = kstrndup(name, name_len, GFP_KERNEL);
1805         if (!ptr)
1806                 return -ENOMEM;
1807
1808         fi_mtp = to_fi_mtp(fi);
1809         fi_mtp->name = ptr;
1810
1811         return 0;
1812 }
1813
1814 static void mtp_free_inst(struct usb_function_instance *fi)
1815 {
1816         struct mtp_instance *fi_mtp;
1817
1818         fi_mtp = to_fi_mtp(fi);
1819         kfree(fi_mtp->name);
1820         mtp_cleanup();
1821         kfree(fi_mtp->mtp_os_desc.group.default_groups);
1822         kfree(fi_mtp);
1823 }
1824
1825 struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
1826 {
1827         struct mtp_instance *fi_mtp;
1828         int ret = 0;
1829         struct usb_os_desc *descs[1];
1830         char *names[1];
1831
1832         fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
1833         if (!fi_mtp)
1834                 return ERR_PTR(-ENOMEM);
1835         fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
1836         fi_mtp->func_inst.free_func_inst = mtp_free_inst;
1837
1838         fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
1839         INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
1840         descs[0] = &fi_mtp->mtp_os_desc;
1841         names[0] = "MTP";
1842         usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
1843                                         descs, names, THIS_MODULE);
1844
1845         if (mtp_config) {
1846                 ret = mtp_setup_configfs(fi_mtp);
1847                 if (ret) {
1848                         kfree(fi_mtp);
1849                         pr_err("Error setting MTP\n");
1850                         return ERR_PTR(ret);
1851                 }
1852         } else
1853                 fi_mtp->dev = _mtp_dev;
1854
1855         config_group_init_type_name(&fi_mtp->func_inst.group,
1856                                         "", &mtp_func_type);
1857
1858         mutex_init(&fi_mtp->dev->read_mutex);
1859
1860         return  &fi_mtp->func_inst;
1861 }
1862 EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
1863
1864 static struct usb_function_instance *mtp_alloc_inst(void)
1865 {
1866                 return alloc_inst_mtp_ptp(true);
1867 }
1868
1869 static int mtp_ctrlreq_configfs(struct usb_function *f,
1870                                 const struct usb_ctrlrequest *ctrl)
1871 {
1872         return mtp_ctrlrequest(f->config->cdev, ctrl);
1873 }
1874
1875 static void mtp_free(struct usb_function *f)
1876 {
1877         /*NO-OP: no function specific resource allocation in mtp_alloc*/
1878 }
1879
1880 struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
1881                                         bool mtp_config)
1882 {
1883         struct mtp_instance *fi_mtp = to_fi_mtp(fi);
1884         struct mtp_dev *dev;
1885
1886         /*
1887          * PTP piggybacks on MTP function so make sure we have
1888          * created MTP function before we associate this PTP
1889          * function with a gadget configuration.
1890          */
1891         if (fi_mtp->dev == NULL) {
1892                 pr_err("Error: Create MTP function before linking"
1893                                 " PTP function with a gadget configuration\n");
1894                 pr_err("\t1: Delete existing PTP function if any\n");
1895                 pr_err("\t2: Create MTP function\n");
1896                 pr_err("\t3: Create and symlink PTP function"
1897                                 " with a gadget configuration\n");
1898                 return ERR_PTR(-EINVAL); /* Invalid Configuration */
1899         }
1900
1901         dev = fi_mtp->dev;
1902         dev->function.name = DRIVER_NAME;
1903         dev->function.strings = mtp_strings;
1904         if (mtp_config) {
1905                 dev->function.fs_descriptors = fs_mtp_descs;
1906                 dev->function.hs_descriptors = hs_mtp_descs;
1907                 dev->function.ss_descriptors = ss_mtp_descs;
1908         } else {
1909                 dev->function.fs_descriptors = fs_ptp_descs;
1910                 dev->function.hs_descriptors = hs_ptp_descs;
1911                 dev->function.ss_descriptors = ss_ptp_descs;
1912         }
1913         dev->function.bind = mtp_function_bind;
1914         dev->function.unbind = mtp_function_unbind;
1915         dev->function.set_alt = mtp_function_set_alt;
1916         dev->function.disable = mtp_function_disable;
1917         dev->function.setup = mtp_ctrlreq_configfs;
1918         dev->function.free_func = mtp_free;
1919         dev->is_ptp = !mtp_config;
1920
1921         return &dev->function;
1922 }
1923 EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
1924
1925 static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
1926 {
1927         return function_alloc_mtp_ptp(fi, true);
1928 }
1929
1930 DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
1931 MODULE_LICENSE("GPL");