OSDN Git Service

treewide: Fix code issues detected using GCC 8
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / media / platform / msm / camera_v2 / msm.c
1 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <linux/of.h>
14 #include <linux/module.h>
15 #include <linux/workqueue.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/ioctl.h>
20 #include <linux/spinlock.h>
21 #include <linux/proc_fs.h>
22 #include <linux/atomic.h>
23 #include <linux/videodev2.h>
24 #include <linux/msm_ion.h>
25 #include <linux/iommu.h>
26 #include <linux/platform_device.h>
27 #include <linux/debugfs.h>
28 #include <media/v4l2-fh.h>
29 #include "msm.h"
30 #include "msm_vb2.h"
31 #include "msm_sd.h"
32 #include "cam_hw_ops.h"
33 #include <media/msmb_generic_buf_mgr.h>
34
35 static struct v4l2_device *msm_v4l2_dev;
36 static struct list_head    ordered_sd_list;
37 static struct mutex        ordered_sd_mtx;
38 static struct mutex        v4l2_event_mtx;
39
40 static atomic_t qos_add_request_done = ATOMIC_INIT(0);
41 static struct pm_qos_request msm_v4l2_pm_qos_request;
42
43 static struct msm_queue_head *msm_session_q;
44
45 /* This variable represent daemon status
46  * true = daemon present (default state)
47  * false = daemon is NOT present
48  */
49 bool is_daemon_status = true;
50
51 /* config node envent queue */
52 static struct v4l2_fh  *msm_eventq;
53 spinlock_t msm_eventq_lock;
54
55 static struct pid *msm_pid;
56 spinlock_t msm_pid_lock;
57
58 static uint32_t gpu_limit;
59
60 /*
61  * It takes 20 bytes + NULL character to write the
62  * largest decimal value of an uint64_t
63  */
64 #define LOGSYNC_PACKET_SIZE 21
65
66 #define msm_dequeue(queue, type, member) ({                             \
67         unsigned long flags;                                    \
68         struct msm_queue_head *__q = (queue);                   \
69         type *node = 0;                         \
70         spin_lock_irqsave(&__q->lock, flags);                   \
71         if (!list_empty(&__q->list)) {                          \
72                 __q->len--;                                     \
73                 node = list_first_entry(&__q->list,             \
74                                 type, member);  \
75                 if ((node) && (&node->member) && (&node->member.next))  \
76                         list_del_init(&node->member);                   \
77         }                                                       \
78         spin_unlock_irqrestore(&__q->lock, flags);      \
79         node;                                                   \
80 })
81
82 #define msm_delete_sd_entry(queue, type, member, q_node) ({             \
83         unsigned long flags;                                    \
84         struct msm_queue_head *__q = (queue);                   \
85         type *node = 0;                         \
86         spin_lock_irqsave(&__q->lock, flags);                   \
87         if (!list_empty(&__q->list)) {                          \
88                 list_for_each_entry(node, &__q->list, member)   \
89                 if (node->sd == q_node) {                               \
90                         __q->len--;                             \
91                         list_del_init(&node->member);           \
92                         kzfree(node);                           \
93                         break;                                  \
94                 }                                               \
95         }                                                       \
96         spin_unlock_irqrestore(&__q->lock, flags);              \
97 })
98
99 #define msm_delete_entry(queue, type, member, q_node) ({                \
100         unsigned long flags;                                    \
101         struct msm_queue_head *__q = (queue);                   \
102         type *node = 0;                         \
103         spin_lock_irqsave(&__q->lock, flags);                   \
104         if (!list_empty(&__q->list)) {                          \
105                 list_for_each_entry(node, &__q->list, member)   \
106                 if (node == q_node) {                           \
107                         __q->len--;                             \
108                         list_del_init(&node->member);           \
109                         kzfree(node);                           \
110                         break;                                  \
111                 }                                               \
112         }                                                       \
113         spin_unlock_irqrestore(&__q->lock, flags);              \
114 })
115
116 #define msm_queue_drain(queue, type, member) do {                       \
117         unsigned long flags;                                    \
118         struct msm_queue_head *__q = (queue);                   \
119         type *node;                             \
120         spin_lock_irqsave(&__q->lock, flags);                   \
121         while (!list_empty(&__q->list)) {                       \
122                 __q->len--;                                     \
123                 node = list_first_entry(&__q->list,             \
124                         type, member);          \
125                 if (node) {                                     \
126                         if (&node->member) \
127                                 list_del_init(&node->member);           \
128                         kzfree(node);   \
129                 }       \
130         }       \
131         spin_unlock_irqrestore(&__q->lock, flags);              \
132 } while (0)
133
134 typedef int (*msm_queue_func)(void *d1, void *d2);
135 #define msm_queue_traverse_action(queue, type, member, func, data) do {\
136         unsigned long flags;                                    \
137         struct msm_queue_head *__q = (queue);                   \
138         type *node = 0; \
139         msm_queue_func __f = (func); \
140         spin_lock_irqsave(&__q->lock, flags);                   \
141         if (!list_empty(&__q->list)) { \
142                 list_for_each_entry(node, &__q->list, member) \
143                 if (node && __f)  { \
144                         __f(node, data); \
145           } \
146         } \
147         spin_unlock_irqrestore(&__q->lock, flags);                      \
148 } while (0)
149
150 typedef int (*msm_queue_find_func)(void *d1, void *d2);
151 #define msm_queue_find(queue, type, member, func, data) ({\
152         unsigned long flags;                                    \
153         struct msm_queue_head *__q = (queue);                   \
154         type *node = NULL; \
155         typeof(node) __ret = NULL; \
156         msm_queue_find_func __f = (func); \
157         spin_lock_irqsave(&__q->lock, flags);                   \
158         if (!list_empty(&__q->list)) { \
159                 list_for_each_entry(node, &__q->list, member) \
160                 if ((__f) && __f(node, data)) { \
161                         __ret = node; \
162                   break; \
163                 } \
164         } \
165         spin_unlock_irqrestore(&__q->lock, flags); \
166         __ret; \
167 })
168
169 static void msm_init_queue(struct msm_queue_head *qhead)
170 {
171         BUG_ON(!qhead);
172
173         INIT_LIST_HEAD(&qhead->list);
174         spin_lock_init(&qhead->lock);
175         qhead->len = 0;
176         qhead->max = 0;
177 }
178
179 static void msm_enqueue(struct msm_queue_head *qhead,
180                 struct list_head *entry)
181 {
182         unsigned long flags;
183         spin_lock_irqsave(&qhead->lock, flags);
184         qhead->len++;
185         if (qhead->len > qhead->max)
186                 qhead->max = qhead->len;
187         list_add_tail(entry, &qhead->list);
188         spin_unlock_irqrestore(&qhead->lock, flags);
189 }
190
191 void msm_cam_copy_v4l2_subdev_fops(struct v4l2_file_operations *d1)
192 {
193         *d1 = v4l2_subdev_fops;
194 }
195 EXPORT_SYMBOL(msm_cam_copy_v4l2_subdev_fops);
196
197 static const struct v4l2_file_operations *msm_cam_get_v4l2_subdev_fops_ptr(
198         void)
199 {
200         return &v4l2_subdev_fops;
201 }
202
203 /* index = session id */
204 static inline int __msm_queue_find_session(void *d1, void *d2)
205 {
206         struct msm_session *session = d1;
207         return (session->session_id == *(unsigned int *)d2) ? 1 : 0;
208 }
209
210 static inline int __msm_queue_find_stream(void *d1, void *d2)
211 {
212         struct msm_stream *stream = d1;
213         return (stream->stream_id == *(unsigned int *)d2) ? 1 : 0;
214 }
215
216 static inline int __msm_queue_find_command_ack_q(void *d1, void *d2)
217 {
218         struct msm_command_ack *ack = d1;
219         return (ack->stream_id == *(unsigned int *)d2) ? 1 : 0;
220 }
221
222 static inline void msm_pm_qos_add_request(void)
223 {
224         pr_info("%s: add request", __func__);
225         if (atomic_cmpxchg(&qos_add_request_done, 0, 1))
226                 return;
227         pm_qos_add_request(&msm_v4l2_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
228         PM_QOS_DEFAULT_VALUE);
229 }
230
231 static void msm_pm_qos_remove_request(void)
232 {
233         pr_info("%s: remove request", __func__);
234         pm_qos_remove_request(&msm_v4l2_pm_qos_request);
235 }
236
237 void msm_pm_qos_update_request(int val)
238 {
239         pr_info("%s: update request %d", __func__, val);
240         msm_pm_qos_add_request();
241         pm_qos_update_request(&msm_v4l2_pm_qos_request, val);
242 }
243
244 struct msm_session *msm_session_find(unsigned int session_id)
245 {
246         struct msm_session *session;
247         session = msm_queue_find(msm_session_q, struct msm_session,
248                 list, __msm_queue_find_session, &session_id);
249         if (WARN_ON(!session))
250                 return NULL;
251         return session;
252 }
253 EXPORT_SYMBOL(msm_session_find);
254
255 int msm_create_stream(unsigned int session_id,
256         unsigned int stream_id, struct vb2_queue *q)
257 {
258         struct msm_session *session;
259         struct msm_stream  *stream;
260
261         session = msm_queue_find(msm_session_q, struct msm_session,
262                 list, __msm_queue_find_session, &session_id);
263         if (!session)
264                 return -EINVAL;
265
266         stream = kzalloc(sizeof(*stream), GFP_KERNEL);
267         if (!stream)
268                 return -ENOMEM;
269
270         stream->stream_id = stream_id;
271         stream->vb2_q = q;
272         spin_lock_init(&stream->stream_lock);
273         msm_enqueue(&session->stream_q, &stream->list);
274         session->stream_q.len++;
275
276         INIT_LIST_HEAD(&stream->queued_list);
277
278         return 0;
279 }
280 EXPORT_SYMBOL(msm_create_stream);
281
282 void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
283 {
284         struct msm_session *session = NULL;
285         struct msm_stream  *stream = NULL;
286         unsigned long flags;
287         int try_count = 0;
288
289         session = msm_queue_find(msm_session_q, struct msm_session,
290                 list, __msm_queue_find_session, &session_id);
291
292         if (!session)
293                 return;
294
295         while (1) {
296                 unsigned long wl_flags;
297
298                 if (try_count > 5) {
299                         pr_err("%s : not able to delete stream %d\n",
300                                 __func__, __LINE__);
301                         break;
302                 }
303
304                 write_lock_irqsave(&session->stream_rwlock, wl_flags);
305                 try_count++;
306                 stream = msm_queue_find(&session->stream_q, struct msm_stream,
307                         list, __msm_queue_find_stream, &stream_id);
308
309                 if (!stream) {
310                         write_unlock_irqrestore(&session->stream_rwlock,
311                                 wl_flags);
312                         return;
313                 }
314
315                 if (msm_vb2_get_stream_state(stream) != 1) {
316                         write_unlock_irqrestore(&session->stream_rwlock,
317                                 wl_flags);
318                         continue;
319                 }
320
321                 spin_lock_irqsave(&(session->stream_q.lock), flags);
322                 list_del_init(&stream->list);
323                 session->stream_q.len--;
324                 kfree(stream);
325                 stream = NULL;
326                 spin_unlock_irqrestore(&(session->stream_q.lock), flags);
327                 write_unlock_irqrestore(&session->stream_rwlock, wl_flags);
328                 break;
329         }
330
331 }
332 EXPORT_SYMBOL(msm_delete_stream);
333
334 static void msm_sd_unregister_subdev(struct video_device *vdev)
335 {
336         struct v4l2_subdev *sd = video_get_drvdata(vdev);
337         sd->devnode = NULL;
338         kzfree(vdev);
339 }
340
341 static inline int __msm_sd_register_subdev(struct v4l2_subdev *sd)
342 {
343         int rc = 0;
344         struct video_device *vdev;
345
346         if (!msm_v4l2_dev || !sd || !sd->name[0])
347                 return -EINVAL;
348
349         rc = v4l2_device_register_subdev(msm_v4l2_dev, sd);
350         if (rc < 0)
351                 return rc;
352
353         /* Register a device node for every subdev marked with the
354          * V4L2_SUBDEV_FL_HAS_DEVNODE flag.
355          */
356         if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
357                 return rc;
358
359         vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
360         if (!vdev) {
361                 rc = -ENOMEM;
362                 goto clean_up;
363         }
364
365         video_set_drvdata(vdev, sd);
366         strlcpy(vdev->name, sd->name, sizeof(vdev->name));
367         vdev->v4l2_dev = msm_v4l2_dev;
368         vdev->fops = msm_cam_get_v4l2_subdev_fops_ptr();
369         vdev->release = msm_sd_unregister_subdev;
370         rc = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
371                   sd->owner);
372         if (rc < 0) {
373                 kzfree(vdev);
374                 goto clean_up;
375         }
376
377 #if defined(CONFIG_MEDIA_CONTROLLER)
378         sd->entity.info.dev.major = VIDEO_MAJOR;
379         sd->entity.info.dev.minor = vdev->minor;
380         sd->entity.name = video_device_node_name(vdev);
381 #endif
382         sd->devnode = vdev;
383         return 0;
384
385 clean_up:
386         if (sd->devnode)
387                 video_unregister_device(sd->devnode);
388         return rc;
389 }
390
391 static void msm_add_sd_in_position(struct msm_sd_subdev *msm_subdev,
392         struct list_head *sd_list)
393 {
394         struct msm_sd_subdev *temp_sd;
395
396         list_for_each_entry(temp_sd, sd_list, list) {
397                 if (temp_sd == msm_subdev) {
398                         pr_err("%s :Fail to add the same sd %d\n",
399                                 __func__, __LINE__);
400                         return;
401                 }
402                 if (msm_subdev->close_seq < temp_sd->close_seq) {
403                         list_add_tail(&msm_subdev->list, &temp_sd->list);
404                         return;
405                 }
406         }
407         list_add_tail(&msm_subdev->list, sd_list);
408 }
409
410 int msm_sd_register(struct msm_sd_subdev *msm_subdev)
411 {
412         if (WARN_ON(!msm_subdev))
413                 return -EINVAL;
414
415         if (WARN_ON(!msm_v4l2_dev) || WARN_ON(!msm_v4l2_dev->dev))
416                 return -EIO;
417
418         mutex_lock(&ordered_sd_mtx);
419         msm_add_sd_in_position(msm_subdev, &ordered_sd_list);
420         mutex_unlock(&ordered_sd_mtx);
421         return __msm_sd_register_subdev(&msm_subdev->sd);
422 }
423 EXPORT_SYMBOL(msm_sd_register);
424
425 int msm_sd_unregister(struct msm_sd_subdev *msm_subdev)
426 {
427         if (WARN_ON(!msm_subdev))
428                 return -EINVAL;
429
430         v4l2_device_unregister_subdev(&msm_subdev->sd);
431         return 0;
432 }
433 EXPORT_SYMBOL(msm_sd_unregister);
434
435 static struct v4l2_subdev *msm_sd_find(const char *name)
436 {
437         unsigned long flags;
438         struct v4l2_subdev *subdev = NULL;
439         struct v4l2_subdev *subdev_out = NULL;
440
441         spin_lock_irqsave(&msm_v4l2_dev->lock, flags);
442         if (!list_empty(&msm_v4l2_dev->subdevs)) {
443                 list_for_each_entry(subdev, &msm_v4l2_dev->subdevs, list)
444                         if (!strcmp(name, subdev->name)) {
445                                 subdev_out = subdev;
446                                 break;
447                         }
448         }
449         spin_unlock_irqrestore(&msm_v4l2_dev->lock, flags);
450
451         return subdev_out;
452 }
453
454 int msm_create_session(unsigned int session_id, struct video_device *vdev)
455 {
456         struct msm_session *session = NULL;
457
458         if (!msm_session_q) {
459                 pr_err("%s : session queue not available Line %d\n",
460                                 __func__, __LINE__);
461                 return -ENODEV;
462         }
463
464         session = msm_queue_find(msm_session_q, struct msm_session,
465                 list, __msm_queue_find_session, &session_id);
466         if (session) {
467                 pr_err("%s: Session exist session_id=%d\n",
468                                 __func__, session_id);
469                 return -EINVAL;
470         }
471
472         session = kzalloc(sizeof(*session), GFP_KERNEL);
473         if (!session) {
474                 pr_err("%s : Memory not available Line %d\n",
475                                 __func__, __LINE__);
476                 return -ENOMEM;
477         }
478
479         session->session_id = session_id;
480         session->event_q.vdev = vdev;
481         msm_init_queue(&session->command_ack_q);
482         msm_init_queue(&session->stream_q);
483         msm_enqueue(msm_session_q, &session->list);
484         mutex_init(&session->lock);
485         mutex_init(&session->lock_q);
486         mutex_init(&session->close_lock);
487         rwlock_init(&session->stream_rwlock);
488
489         if (gpu_limit) {
490                 session->sysfs_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0);
491                 if (session->sysfs_pwr_limit)
492                         kgsl_pwr_limits_set_freq(session->sysfs_pwr_limit,
493                                 gpu_limit);
494         }
495
496         return 0;
497 }
498 EXPORT_SYMBOL(msm_create_session);
499
500 int msm_create_command_ack_q(unsigned int session_id, unsigned int stream_id)
501 {
502         struct msm_session *session;
503         struct msm_command_ack *cmd_ack;
504
505         if (!msm_session_q) {
506                 pr_err("%s : Session queue not available Line %d\n",
507                                 __func__, __LINE__);
508                 return -ENODEV;
509         }
510
511         session = msm_queue_find(msm_session_q, struct msm_session,
512                 list, __msm_queue_find_session, &session_id);
513         if (!session) {
514                 pr_err("%s : Session not found Line %d\n",
515                                 __func__, __LINE__);
516                 return -EINVAL;
517         }
518         mutex_lock(&session->lock);
519         cmd_ack = kzalloc(sizeof(*cmd_ack), GFP_KERNEL);
520         if (!cmd_ack) {
521                 mutex_unlock(&session->lock);
522                 pr_err("%s : memory not available Line %d\n",
523                                 __func__, __LINE__);
524                 return -ENOMEM;
525         }
526
527         msm_init_queue(&cmd_ack->command_q);
528         INIT_LIST_HEAD(&cmd_ack->list);
529         init_completion(&cmd_ack->wait_complete);
530         cmd_ack->stream_id = stream_id;
531
532         msm_enqueue(&session->command_ack_q, &cmd_ack->list);
533         session->command_ack_q.len++;
534         mutex_unlock(&session->lock);
535         return 0;
536 }
537 EXPORT_SYMBOL(msm_create_command_ack_q);
538
539 void msm_delete_command_ack_q(unsigned int session_id, unsigned int stream_id)
540 {
541         struct msm_session *session;
542         struct msm_command_ack *cmd_ack;
543         unsigned long flags;
544
545         session = msm_queue_find(msm_session_q, struct msm_session,
546                 list, __msm_queue_find_session, &session_id);
547         if (!session)
548                 return;
549         mutex_lock(&session->lock);
550
551         cmd_ack = msm_queue_find(&session->command_ack_q,
552                 struct msm_command_ack, list, __msm_queue_find_command_ack_q,
553                 &stream_id);
554         if (!cmd_ack) {
555                 mutex_unlock(&session->lock);
556                 return;
557         }
558
559         msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
560
561         spin_lock_irqsave(&(session->command_ack_q.lock), flags);
562         list_del_init(&cmd_ack->list);
563         kzfree(cmd_ack);
564         session->command_ack_q.len--;
565         spin_unlock_irqrestore(&(session->command_ack_q.lock), flags);
566         mutex_unlock(&session->lock);
567 }
568 EXPORT_SYMBOL(msm_delete_command_ack_q);
569
570 static inline int __msm_sd_close_subdevs(struct msm_sd_subdev *msm_sd,
571         struct msm_sd_close_ioctl *sd_close)
572 {
573         struct v4l2_subdev *sd;
574         sd = &msm_sd->sd;
575         pr_debug("%s: Shutting down subdev %s", __func__, sd->name);
576
577         v4l2_subdev_call(sd, core, ioctl, MSM_SD_SHUTDOWN, sd_close);
578         v4l2_subdev_call(sd, core, s_power, 0);
579
580         return 0;
581 }
582
583 static inline int __msm_sd_notify_freeze_subdevs(struct msm_sd_subdev *msm_sd,
584         int enable)
585 {
586         struct v4l2_subdev *sd;
587         sd = &msm_sd->sd;
588
589         if (enable)
590                 v4l2_subdev_call(sd, core, ioctl, MSM_SD_NOTIFY_FREEZE, NULL);
591         else
592                 v4l2_subdev_call(sd, core, ioctl, MSM_SD_UNNOTIFY_FREEZE, NULL);
593
594         return 0;
595 }
596
597 static inline int __msm_destroy_session_streams(void *d1, void *d2)
598 {
599         struct msm_stream *stream = d1;
600         unsigned long flags;
601
602         pr_err("%s: Error: Destroyed list is not empty\n", __func__);
603         spin_lock_irqsave(&stream->stream_lock, flags);
604         INIT_LIST_HEAD(&stream->queued_list);
605         spin_unlock_irqrestore(&stream->stream_lock, flags);
606         return 0;
607 }
608
609 static void msm_destroy_session_streams(struct msm_session *session)
610 {
611
612         if (!session)
613                 return;
614
615         msm_queue_traverse_action(&session->stream_q, struct msm_stream, list,
616                 __msm_destroy_session_streams, NULL);
617
618         msm_queue_drain(&session->stream_q, struct msm_stream, list);
619 }
620
621 static inline int __msm_remove_session_cmd_ack_q(void *d1, void *d2)
622 {
623         struct msm_command_ack *cmd_ack = d1;
624
625         msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
626
627         return 0;
628 }
629
630 static void msm_remove_session_cmd_ack_q(struct msm_session *session)
631 {
632         if (!session)
633                 return;
634
635         mutex_lock(&session->lock);
636         /* to ensure error handling purpose, it needs to detach all subdevs
637          * which are being connected to streams */
638         msm_queue_traverse_action(&session->command_ack_q,
639                 struct msm_command_ack, list,
640                 __msm_remove_session_cmd_ack_q, NULL);
641
642         msm_queue_drain(&session->command_ack_q, struct msm_command_ack, list);
643
644         mutex_unlock(&session->lock);
645 }
646
647 int msm_destroy_session(unsigned int session_id)
648 {
649         struct msm_session *session;
650         struct v4l2_subdev *buf_mgr_subdev;
651         struct msm_sd_close_ioctl session_info;
652
653         session = msm_queue_find(msm_session_q, struct msm_session,
654                 list, __msm_queue_find_session, &session_id);
655         if (!session)
656                 return -EINVAL;
657
658         if (gpu_limit && session->sysfs_pwr_limit) {
659                 kgsl_pwr_limits_set_default(session->sysfs_pwr_limit);
660                 kgsl_pwr_limits_del(session->sysfs_pwr_limit);
661         }
662
663         msm_destroy_session_streams(session);
664         msm_remove_session_cmd_ack_q(session);
665         mutex_destroy(&session->lock);
666         mutex_destroy(&session->lock_q);
667         mutex_destroy(&session->close_lock);
668         msm_delete_entry(msm_session_q, struct msm_session,
669                 list, session);
670         buf_mgr_subdev = msm_sd_find("msm_buf_mngr");
671         if (buf_mgr_subdev) {
672                 session_info.session = session_id;
673                 session_info.stream = 0;
674                 v4l2_subdev_call(buf_mgr_subdev, core, ioctl,
675                         MSM_SD_SHUTDOWN, &session_info);
676         } else {
677                 pr_err("%s: Buff manger device node is NULL\n", __func__);
678         }
679
680         return 0;
681 }
682 EXPORT_SYMBOL(msm_destroy_session);
683
684 static int __msm_close_destry_session_notify_apps(void *d1, void *d2)
685 {
686         struct v4l2_event event;
687         struct msm_v4l2_event_data *event_data =
688                 (struct msm_v4l2_event_data *)&event.u.data[0];
689         struct msm_session *session = d1;
690
691         event.type = MSM_CAMERA_V4L2_EVENT_TYPE;
692         event.id   = MSM_CAMERA_MSM_NOTIFY;
693         event_data->command = MSM_CAMERA_PRIV_SHUTDOWN;
694
695         v4l2_event_queue(session->event_q.vdev, &event);
696
697         return 0;
698 }
699
700 static int __msm_wakeup_all_cmdack_session_stream(void *d1, void *d2)
701 {
702         struct msm_stream *stream = d1;
703         struct msm_session *session = d2;
704         struct msm_command_ack *cmd_ack = NULL;
705         unsigned long spin_flags = 0;
706
707         cmd_ack = msm_queue_find(&session->command_ack_q,
708                 struct msm_command_ack, list,
709                 __msm_queue_find_command_ack_q,
710                 &stream->stream_id);
711         if (cmd_ack) {
712                 spin_lock_irqsave(&(session->command_ack_q.lock),
713                         spin_flags);
714                 complete(&cmd_ack->wait_complete);
715                 spin_unlock_irqrestore(&(session->command_ack_q.lock),
716                         spin_flags);
717         }
718         return 0;
719 }
720
721 static int __msm_close_wakeup_all_cmdack_session(void *d1, void *d2)
722 {
723         struct msm_stream  *stream = NULL;
724         struct msm_session *session = d1;
725
726         stream = msm_queue_find(&session->stream_q, struct msm_stream,
727                 list, __msm_wakeup_all_cmdack_session_stream, d1);
728         return 0;
729 }
730
731 static long msm_private_ioctl(struct file *file, void *fh,
732         bool valid_prio, unsigned int cmd, void *arg)
733 {
734         int rc = 0;
735         struct msm_v4l2_event_data *event_data = arg;
736         struct v4l2_event event;
737         struct msm_session *session;
738         unsigned int session_id;
739         unsigned int stream_id;
740         unsigned long spin_flags = 0;
741         struct msm_sd_subdev *msm_sd;
742
743         if (cmd == MSM_CAM_V4L2_IOCTL_DAEMON_DISABLED) {
744                 is_daemon_status = false;
745                 return 0;
746         }
747
748         if (!event_data)
749                 return -EINVAL;
750
751         switch (cmd) {
752         case MSM_CAM_V4L2_IOCTL_NOTIFY:
753         case MSM_CAM_V4L2_IOCTL_CMD_ACK:
754         case MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG:
755         case MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR:
756                 break;
757         default:
758                 return -ENOTTY;
759         }
760
761         memset(&event, 0, sizeof(struct v4l2_event));
762         session_id = event_data->session_id;
763         stream_id = event_data->stream_id;
764
765         session = msm_queue_find(msm_session_q, struct msm_session,
766                 list, __msm_queue_find_session, &session_id);
767
768         if (!session)
769                 return -EINVAL;
770
771         switch (cmd) {
772         case MSM_CAM_V4L2_IOCTL_NOTIFY: {
773                 if (WARN_ON(!session->event_q.vdev)) {
774                         rc = -EFAULT;
775                         break;
776                 }
777                 event.type = event_data->v4l2_event_type;
778                 event.id = event_data->v4l2_event_id;
779                 memcpy(&event.u.data, event_data,
780                         sizeof(struct msm_v4l2_event_data));
781                 v4l2_event_queue(session->event_q.vdev,
782                         &event);
783         }
784                 break;
785
786         case MSM_CAM_V4L2_IOCTL_CMD_ACK: {
787                 struct msm_command_ack *cmd_ack;
788                 struct msm_command *ret_cmd;
789
790                 ret_cmd = kzalloc(sizeof(*ret_cmd), GFP_KERNEL);
791                 if (!ret_cmd) {
792                         rc = -ENOMEM;
793                         break;
794                 }
795
796                 cmd_ack = msm_queue_find(&session->command_ack_q,
797                         struct msm_command_ack, list,
798                         __msm_queue_find_command_ack_q,
799                         &stream_id);
800                 if (WARN_ON(!cmd_ack)) {
801                         kzfree(ret_cmd);
802                         rc = -EFAULT;
803                         break;
804                 }
805
806                 spin_lock_irqsave(&(session->command_ack_q.lock),
807                    spin_flags);
808                 event.type = event_data->v4l2_event_type;
809                 event.id = event_data->v4l2_event_id;
810                 memcpy(&event.u.data, event_data,
811                         sizeof(struct msm_v4l2_event_data));
812                 memcpy(&ret_cmd->event, &event, sizeof(struct v4l2_event));
813                 msm_enqueue(&cmd_ack->command_q, &ret_cmd->list);
814                 complete(&cmd_ack->wait_complete);
815                 spin_unlock_irqrestore(&(session->command_ack_q.lock),
816                    spin_flags);
817         }
818                 break;
819
820         case MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG: {
821                 if (event_data->status) {
822                         pr_err("%s:Notifying subdevs about potential sof freeze\n",
823                                 __func__);
824                 } else {
825                         pr_err("%s:Notifying subdevs about sof recover\n",
826                                 __func__);
827                 }
828
829                 mutex_lock(&ordered_sd_mtx);
830                 if (!list_empty(&msm_v4l2_dev->subdevs)) {
831                         list_for_each_entry(msm_sd, &ordered_sd_list, list)
832                                 __msm_sd_notify_freeze_subdevs(msm_sd,
833                                         event_data->status);
834                 }
835                 mutex_unlock(&ordered_sd_mtx);
836         }
837                 break;
838
839         case MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR:
840                 /* send v4l2_event to HAL next*/
841                 msm_queue_traverse_action(msm_session_q,
842                         struct msm_session, list,
843                         __msm_close_destry_session_notify_apps, NULL);
844                 break;
845
846         default:
847                 rc = -ENOTTY;
848                 break;
849         }
850
851         return rc;
852 }
853
854 static int msm_unsubscribe_event(struct v4l2_fh *fh,
855         const struct v4l2_event_subscription *sub)
856 {
857         int rc;
858
859         mutex_lock(&v4l2_event_mtx);
860         rc = v4l2_event_unsubscribe(fh, sub);
861         mutex_unlock(&v4l2_event_mtx);
862
863         return rc;
864 }
865
866 static int msm_subscribe_event(struct v4l2_fh *fh,
867         const struct v4l2_event_subscription *sub)
868 {
869         int rc;
870
871         mutex_lock(&v4l2_event_mtx);
872         rc = v4l2_event_subscribe(fh, sub, 5, NULL);
873         mutex_unlock(&v4l2_event_mtx);
874
875         return rc;
876 }
877
878 static const struct v4l2_ioctl_ops g_msm_ioctl_ops = {
879         .vidioc_subscribe_event = msm_subscribe_event,
880         .vidioc_unsubscribe_event = msm_unsubscribe_event,
881         .vidioc_default = msm_private_ioctl,
882 };
883
884 static unsigned int msm_poll(struct file *f,
885         struct poll_table_struct *pll_table)
886 {
887         int rc = 0;
888         struct v4l2_fh *eventq = f->private_data;
889
890         BUG_ON(!eventq);
891
892         poll_wait(f, &eventq->wait, pll_table);
893
894         if (v4l2_event_pending(eventq))
895                 rc = POLLIN | POLLRDNORM;
896
897         return rc;
898 }
899
900 static void msm_print_event_error(struct v4l2_event *event)
901 {
902         struct msm_v4l2_event_data *event_data =
903                 (struct msm_v4l2_event_data *)&event->u.data[0];
904
905         pr_err("Evt_type=%x Evt_id=%d Evt_cmd=%x\n", event->type,
906                 event->id, event_data->command);
907         pr_err("Evt_session_id=%d Evt_stream_id=%d Evt_arg=%d\n",
908                 event_data->session_id, event_data->stream_id,
909                 event_data->arg_value);
910 }
911
912 /* something seriously wrong if msm_close is triggered
913  *   !!! user space imaging server is shutdown !!!
914  */
915 int msm_post_event(struct v4l2_event *event, int timeout)
916 {
917         int rc = 0;
918         struct video_device *vdev;
919         struct msm_session *session;
920         struct msm_v4l2_event_data *event_data =
921                 (struct msm_v4l2_event_data *)&event->u.data[0];
922         struct msm_command_ack *cmd_ack;
923         struct msm_command *cmd;
924         int session_id, stream_id;
925         unsigned long flags = 0;
926
927         session_id = event_data->session_id;
928         stream_id = event_data->stream_id;
929
930         spin_lock_irqsave(&msm_eventq_lock, flags);
931         if (!msm_eventq) {
932                 spin_unlock_irqrestore(&msm_eventq_lock, flags);
933                 pr_err("%s : msm event queue not available Line %d\n",
934                                 __func__, __LINE__);
935                 return -ENODEV;
936         }
937         spin_unlock_irqrestore(&msm_eventq_lock, flags);
938
939         vdev = msm_eventq->vdev;
940
941         /* send to imaging server and wait for ACK */
942         session = msm_queue_find(msm_session_q, struct msm_session,
943                 list, __msm_queue_find_session, &session_id);
944         if (WARN_ON(!session)) {
945                 pr_err("%s : session not found Line %d\n",
946                                 __func__, __LINE__);
947                 return -EIO;
948         }
949         mutex_lock(&session->lock);
950         cmd_ack = msm_queue_find(&session->command_ack_q,
951                 struct msm_command_ack, list,
952                 __msm_queue_find_command_ack_q, &stream_id);
953         if (WARN_ON(!cmd_ack)) {
954                 mutex_unlock(&session->lock);
955                 pr_err("%s : cmd_ack not found Line %d\n",
956                                 __func__, __LINE__);
957                 return -EIO;
958         }
959
960         /*re-init wait_complete */
961         reinit_completion(&cmd_ack->wait_complete);
962
963         v4l2_event_queue(vdev, event);
964
965         if (timeout < 0) {
966                 mutex_unlock(&session->lock);
967                 pr_debug("%s : timeout cannot be negative Line %d\n",
968                                 __func__, __LINE__);
969                 return rc;
970         }
971
972         /* should wait on session based condition */
973         rc = wait_for_completion_timeout(&cmd_ack->wait_complete,
974                         msecs_to_jiffies(timeout));
975
976
977         if (list_empty_careful(&cmd_ack->command_q.list)) {
978                 if (!rc) {
979                         pr_err("%s: Timed out\n", __func__);
980                         msm_print_event_error(event);
981                         mutex_unlock(&session->lock);
982                         return -ETIMEDOUT;
983                 } else {
984                         pr_err("%s: Error: No timeout but list empty!",
985                                         __func__);
986                         msm_print_event_error(event);
987                         mutex_unlock(&session->lock);
988                         return -EINVAL;
989                 }
990         }
991
992         cmd = msm_dequeue(&cmd_ack->command_q,
993                 struct msm_command, list);
994         if (!cmd) {
995                 mutex_unlock(&session->lock);
996                 pr_err("%s : cmd dequeue failed Line %d\n",
997                                 __func__, __LINE__);
998                 return -EINVAL;
999         }
1000
1001         event_data = (struct msm_v4l2_event_data *)cmd->event.u.data;
1002
1003         /* compare cmd_ret and event */
1004         if (WARN_ON(event->type != cmd->event.type) ||
1005                         WARN_ON(event->id != cmd->event.id)) {
1006                 pr_err("%s : Either event type or id didnot match Line %d\n",
1007                                 __func__, __LINE__);
1008                 pr_err("%s : event->type %d event->id %d\n", __func__,
1009                                 event->type, event->id);
1010                 pr_err("%s : cmd->event.type %d cmd->event.id %d\n", __func__,
1011                                 cmd->event.type, cmd->event.id);
1012                 rc = -EINVAL;
1013         }
1014
1015         *event = cmd->event;
1016
1017         kzfree(cmd);
1018         mutex_unlock(&session->lock);
1019         return rc;
1020 }
1021 EXPORT_SYMBOL(msm_post_event);
1022
1023 static int msm_close(struct file *filep)
1024 {
1025         int rc = 0;
1026         unsigned long flags;
1027         struct msm_video_device *pvdev = video_drvdata(filep);
1028         struct msm_sd_close_ioctl sd_close;
1029         struct msm_sd_subdev *msm_sd;
1030
1031         /*stop all hardware blocks immediately*/
1032         mutex_lock(&ordered_sd_mtx);
1033         if (!list_empty(&msm_v4l2_dev->subdevs))
1034                 list_for_each_entry(msm_sd, &ordered_sd_list, list)
1035                         __msm_sd_close_subdevs(msm_sd, &sd_close);
1036         mutex_unlock(&ordered_sd_mtx);
1037
1038         /* remove msm_v4l2_pm_qos_request */
1039         msm_pm_qos_remove_request();
1040
1041         /* send v4l2_event to HAL next*/
1042         msm_queue_traverse_action(msm_session_q, struct msm_session, list,
1043                 __msm_close_destry_session_notify_apps, NULL);
1044
1045         msm_queue_traverse_action(msm_session_q, struct msm_session, list,
1046                 __msm_close_wakeup_all_cmdack_session, NULL);
1047
1048         spin_lock_irqsave(&msm_eventq_lock, flags);
1049         msm_eventq = NULL;
1050         spin_unlock_irqrestore(&msm_eventq_lock, flags);
1051         v4l2_fh_release(filep);
1052
1053         spin_lock_irqsave(&msm_pid_lock, flags);
1054         put_pid(msm_pid);
1055         msm_pid = NULL;
1056         spin_unlock_irqrestore(&msm_pid_lock, flags);
1057
1058         atomic_set(&pvdev->opened, 0);
1059
1060         return rc;
1061 }
1062
1063 static inline void msm_list_switch(struct list_head *l1,
1064         struct list_head *l2)
1065 {
1066         l1->next = l2->next;
1067         l2->prev = l1->prev;
1068         l1->prev->next = l2;
1069         l2->next->prev = l1;
1070         l1->prev = l2;
1071         l2->next = l1;
1072 }
1073
1074 static int msm_open(struct file *filep)
1075 {
1076         int rc;
1077         unsigned long flags;
1078         struct msm_video_device *pvdev = video_drvdata(filep);
1079         BUG_ON(!pvdev);
1080
1081         /* !!! only ONE open is allowed !!! */
1082         if (atomic_cmpxchg(&pvdev->opened, 0, 1))
1083                 return -EBUSY;
1084
1085         spin_lock_irqsave(&msm_pid_lock, flags);
1086         msm_pid = get_pid(task_pid(current));
1087         spin_unlock_irqrestore(&msm_pid_lock, flags);
1088
1089         /* create event queue */
1090         rc = v4l2_fh_open(filep);
1091         if (rc  < 0)
1092                 return rc;
1093
1094         spin_lock_irqsave(&msm_eventq_lock, flags);
1095         msm_eventq = filep->private_data;
1096         spin_unlock_irqrestore(&msm_eventq_lock, flags);
1097
1098         /* register msm_v4l2_pm_qos_request */
1099         msm_pm_qos_add_request();
1100
1101         return rc;
1102 }
1103
1104 static struct v4l2_file_operations msm_fops = {
1105         .owner  = THIS_MODULE,
1106         .open   = msm_open,
1107         .poll   = msm_poll,
1108         .release = msm_close,
1109         .unlocked_ioctl   = video_ioctl2,
1110 #ifdef CONFIG_COMPAT
1111         .compat_ioctl32 = video_ioctl2,
1112 #endif
1113 };
1114
1115 struct msm_session *msm_get_session(unsigned int session_id)
1116 {
1117         struct msm_session *session;
1118
1119         session = msm_queue_find(msm_session_q, struct msm_session,
1120                 list, __msm_queue_find_session, &session_id);
1121         if (!session)
1122                 return ERR_PTR(-EINVAL);
1123
1124         return session;
1125 }
1126 EXPORT_SYMBOL(msm_get_session);
1127
1128
1129 struct msm_stream *msm_get_stream(struct msm_session *session,
1130         unsigned int stream_id)
1131 {
1132         struct msm_stream *stream;
1133
1134         stream = msm_queue_find(&session->stream_q, struct msm_stream,
1135                 list, __msm_queue_find_stream, &stream_id);
1136
1137         if (!stream)
1138                 return ERR_PTR(-EINVAL);
1139
1140         return stream;
1141 }
1142 EXPORT_SYMBOL(msm_get_stream);
1143
1144 struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id,
1145         unsigned int stream_id)
1146 {
1147         struct msm_session *session;
1148         struct msm_stream *stream;
1149
1150         session = msm_queue_find(msm_session_q, struct msm_session,
1151                 list, __msm_queue_find_session, &session_id);
1152         if (!session)
1153                 return NULL;
1154
1155         stream = msm_queue_find(&session->stream_q, struct msm_stream,
1156                 list, __msm_queue_find_stream, &stream_id);
1157         if (!stream)
1158                 return NULL;
1159
1160         return stream->vb2_q;
1161 }
1162 EXPORT_SYMBOL(msm_get_stream_vb2q);
1163
1164 struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q)
1165 {
1166         struct msm_session *session;
1167         struct msm_stream *stream;
1168         unsigned long flags1;
1169         unsigned long flags2;
1170         spin_lock_irqsave(&msm_session_q->lock, flags1);
1171         list_for_each_entry(session, &(msm_session_q->list), list) {
1172                 spin_lock_irqsave(&(session->stream_q.lock), flags2);
1173                 list_for_each_entry(
1174                         stream, &(session->stream_q.list), list) {
1175                         if (stream->vb2_q == q) {
1176                                 spin_unlock_irqrestore
1177                                         (&(session->stream_q.lock), flags2);
1178                                 spin_unlock_irqrestore
1179                                         (&msm_session_q->lock, flags1);
1180                                 return stream;
1181                         }
1182                 }
1183                 spin_unlock_irqrestore(&(session->stream_q.lock), flags2);
1184         }
1185         spin_unlock_irqrestore(&msm_session_q->lock, flags1);
1186         return NULL;
1187 }
1188 EXPORT_SYMBOL(msm_get_stream_from_vb2q);
1189
1190 struct msm_session *msm_get_session_from_vb2q(struct vb2_queue *q)
1191 {
1192         struct msm_session *session;
1193         struct msm_stream *stream;
1194         unsigned long flags1;
1195         unsigned long flags2;
1196
1197         spin_lock_irqsave(&msm_session_q->lock, flags1);
1198         list_for_each_entry(session, &(msm_session_q->list), list) {
1199                 spin_lock_irqsave(&(session->stream_q.lock), flags2);
1200                 list_for_each_entry(
1201                         stream, &(session->stream_q.list), list) {
1202                         if (stream->vb2_q == q) {
1203                                 spin_unlock_irqrestore
1204                                         (&(session->stream_q.lock), flags2);
1205                                 spin_unlock_irqrestore
1206                                         (&msm_session_q->lock, flags1);
1207                                 return session;
1208                         }
1209                 }
1210                 spin_unlock_irqrestore(&(session->stream_q.lock), flags2);
1211         }
1212         spin_unlock_irqrestore(&msm_session_q->lock, flags1);
1213         return NULL;
1214 }
1215 EXPORT_SYMBOL(msm_get_session_from_vb2q);
1216
1217
1218 #ifdef CONFIG_COMPAT
1219 long msm_copy_camera_private_ioctl_args(unsigned long arg,
1220         struct msm_camera_private_ioctl_arg *k_ioctl,
1221         void __user **tmp_compat_ioctl_ptr)
1222 {
1223         struct msm_camera_private_ioctl_arg up_ioctl;
1224
1225         if (WARN_ON(!arg || !k_ioctl || !tmp_compat_ioctl_ptr))
1226                 return -EIO;
1227
1228         if (copy_from_user(&up_ioctl,
1229                 (struct msm_camera_private_ioctl_arg *)arg,
1230                 sizeof(struct msm_camera_private_ioctl_arg)))
1231                 return -EFAULT;
1232
1233         k_ioctl->id = up_ioctl.id;
1234         k_ioctl->size = up_ioctl.size;
1235         k_ioctl->result = up_ioctl.result;
1236         k_ioctl->reserved = up_ioctl.reserved;
1237         *tmp_compat_ioctl_ptr = compat_ptr(up_ioctl.ioctl_ptr);
1238
1239         return 0;
1240 }
1241 EXPORT_SYMBOL(msm_copy_camera_private_ioctl_args);
1242 #endif
1243
1244 static void msm_sd_notify(struct v4l2_subdev *sd,
1245         unsigned int notification, void *arg)
1246 {
1247         int rc = 0;
1248         struct v4l2_subdev *subdev = NULL;
1249
1250         BUG_ON(!sd);
1251         BUG_ON(!arg);
1252
1253         /* Check if subdev exists before processing*/
1254         if (!msm_sd_find(sd->name))
1255                 return;
1256
1257         switch (notification) {
1258         case MSM_SD_NOTIFY_GET_SD: {
1259                 struct msm_sd_req_sd *get_sd = arg;
1260
1261                 get_sd->subdev = msm_sd_find(get_sd->name);
1262                 /* TODO: might need to add ref count on ret_sd */
1263         }
1264                 break;
1265
1266         case MSM_SD_NOTIFY_PUT_SD: {
1267                 struct msm_sd_req_sd *put_sd = arg;
1268                 subdev = msm_sd_find(put_sd->name);
1269         }
1270                 break;
1271
1272         case MSM_SD_NOTIFY_REQ_CB: {
1273                 struct msm_sd_req_vb2_q *req_sd = arg;
1274                 rc = msm_vb2_request_cb(req_sd);
1275                 if (rc < 0)
1276                         return;
1277         }
1278                 break;
1279
1280         default:
1281                 break;
1282         }
1283 }
1284
1285 static ssize_t write_logsync(struct file *file, const char __user *buf,
1286                 size_t count, loff_t *ppos)
1287 {
1288         char lbuf[LOGSYNC_PACKET_SIZE] = {0};
1289         uint64_t seq_num = 0;
1290         int ret;
1291
1292         if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1293                 return -EFAULT;
1294
1295         ret = sscanf(lbuf, "%llu", &seq_num);
1296         if (ret != 1)
1297                 pr_err("LOGSYNC (Kernel): Bad or malformed sequence number\n");
1298         else
1299                 pr_debug("LOGSYNC (Kernel): seq_num = %llu\n", seq_num);
1300
1301         return count;
1302 }
1303
1304
1305 static const struct file_operations logsync_fops = {
1306                 .write = write_logsync,
1307 };
1308
1309 static int msm_probe(struct platform_device *pdev)
1310 {
1311         struct msm_video_device *pvdev = NULL;
1312         static struct dentry *cam_debugfs_root;
1313         int rc = 0;
1314
1315         msm_v4l2_dev = kzalloc(sizeof(*msm_v4l2_dev),
1316                 GFP_KERNEL);
1317         if (WARN_ON(!msm_v4l2_dev)) {
1318                 rc = -ENOMEM;
1319                 goto probe_end;
1320         }
1321
1322         pvdev = kzalloc(sizeof(struct msm_video_device),
1323                 GFP_KERNEL);
1324         if (WARN_ON(!pvdev)) {
1325                 rc = -ENOMEM;
1326                 goto pvdev_fail;
1327         }
1328
1329         pvdev->vdev = video_device_alloc();
1330         if (WARN_ON(!pvdev->vdev)) {
1331                 rc = -ENOMEM;
1332                 goto video_fail;
1333         }
1334
1335 #if defined(CONFIG_MEDIA_CONTROLLER)
1336         msm_v4l2_dev->mdev = kzalloc(sizeof(struct media_device),
1337                 GFP_KERNEL);
1338         if (!msm_v4l2_dev->mdev) {
1339                 rc = -ENOMEM;
1340                 goto mdev_fail;
1341         }
1342         strlcpy(msm_v4l2_dev->mdev->model, MSM_CONFIGURATION_NAME,
1343                         sizeof(msm_v4l2_dev->mdev->model));
1344         msm_v4l2_dev->mdev->dev = &(pdev->dev);
1345
1346         rc = media_device_register(msm_v4l2_dev->mdev);
1347         if (WARN_ON(rc < 0))
1348                 goto media_fail;
1349
1350         if (WARN_ON((rc = media_entity_init(&pvdev->vdev->entity,
1351                         0, NULL, 0)) < 0))
1352                 goto entity_fail;
1353
1354         pvdev->vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
1355         pvdev->vdev->entity.group_id = QCAMERA_VNODE_GROUP_ID;
1356 #endif
1357
1358         msm_v4l2_dev->notify = msm_sd_notify;
1359
1360         pvdev->vdev->v4l2_dev = msm_v4l2_dev;
1361
1362         rc = v4l2_device_register(&(pdev->dev), pvdev->vdev->v4l2_dev);
1363         if (WARN_ON(rc < 0))
1364                 goto register_fail;
1365
1366         strlcpy(pvdev->vdev->name, "msm-config", sizeof(pvdev->vdev->name));
1367         pvdev->vdev->release  = video_device_release;
1368         pvdev->vdev->fops     = &msm_fops;
1369         pvdev->vdev->ioctl_ops = &g_msm_ioctl_ops;
1370         pvdev->vdev->minor     = -1;
1371         pvdev->vdev->vfl_type  = VFL_TYPE_GRABBER;
1372         rc = video_register_device(pvdev->vdev,
1373                 VFL_TYPE_GRABBER, -1);
1374         if (WARN_ON(rc < 0))
1375                 goto v4l2_fail;
1376
1377 #if defined(CONFIG_MEDIA_CONTROLLER)
1378         /* FIXME: How to get rid of this messy? */
1379         pvdev->vdev->entity.name = video_device_node_name(pvdev->vdev);
1380 #endif
1381
1382         atomic_set(&pvdev->opened, 0);
1383         video_set_drvdata(pvdev->vdev, pvdev);
1384
1385         msm_session_q = kzalloc(sizeof(*msm_session_q), GFP_KERNEL);
1386         if (WARN_ON(!msm_session_q))
1387                 goto v4l2_fail;
1388
1389         msm_init_queue(msm_session_q);
1390         spin_lock_init(&msm_eventq_lock);
1391         spin_lock_init(&msm_pid_lock);
1392         mutex_init(&ordered_sd_mtx);
1393         mutex_init(&v4l2_event_mtx);
1394         INIT_LIST_HEAD(&ordered_sd_list);
1395
1396         cam_debugfs_root = debugfs_create_dir(MSM_CAM_LOGSYNC_FILE_BASEDIR,
1397                                                 NULL);
1398         if (!cam_debugfs_root) {
1399                 pr_warn("NON-FATAL: failed to create logsync base directory\n");
1400         } else {
1401                 if (!debugfs_create_file(MSM_CAM_LOGSYNC_FILE_NAME,
1402                                          0666,
1403                                          cam_debugfs_root,
1404                                          NULL,
1405                                          &logsync_fops))
1406                         pr_warn("NON-FATAL: failed to create logsync debugfs file\n");
1407         }
1408
1409         rc = cam_ahb_clk_init(pdev);
1410         if (rc < 0) {
1411                 pr_err("%s: failed to register ahb clocks\n", __func__);
1412                 goto v4l2_fail;
1413         }
1414
1415         of_property_read_u32(pdev->dev.of_node,
1416                 "qcom,gpu-limit", &gpu_limit);
1417
1418         goto probe_end;
1419
1420 v4l2_fail:
1421         v4l2_device_unregister(pvdev->vdev->v4l2_dev);
1422 register_fail:
1423 #if defined(CONFIG_MEDIA_CONTROLLER)
1424         media_entity_cleanup(&pvdev->vdev->entity);
1425 entity_fail:
1426         media_device_unregister(msm_v4l2_dev->mdev);
1427 media_fail:
1428         kzfree(msm_v4l2_dev->mdev);
1429 mdev_fail:
1430 #endif
1431         video_device_release(pvdev->vdev);
1432 video_fail:
1433         kzfree(pvdev);
1434 pvdev_fail:
1435         kzfree(msm_v4l2_dev);
1436 probe_end:
1437         return rc;
1438 }
1439
1440 static const struct of_device_id msm_dt_match[] = {
1441         {.compatible = "qcom,msm-cam"},
1442         {}
1443 };
1444 MODULE_DEVICE_TABLE(of, msm_dt_match);
1445
1446 static struct platform_driver msm_driver = {
1447         .probe = msm_probe,
1448         .driver = {
1449                 .name = "msm",
1450                 .owner = THIS_MODULE,
1451                 .of_match_table = msm_dt_match,
1452         },
1453 };
1454
1455 static int __init msm_init(void)
1456 {
1457         return platform_driver_register(&msm_driver);
1458 }
1459
1460 static void __exit msm_exit(void)
1461 {
1462         platform_driver_unregister(&msm_driver);
1463 }
1464
1465
1466 module_init(msm_init);
1467 module_exit(msm_exit);
1468 MODULE_DESCRIPTION("MSM V4L2 Camera");
1469 MODULE_LICENSE("GPL v2");