1 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
14 #include <linux/module.h>
15 #include <linux/workqueue.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/ioctl.h>
20 #include <linux/spinlock.h>
21 #include <linux/proc_fs.h>
22 #include <linux/atomic.h>
23 #include <linux/videodev2.h>
24 #include <linux/msm_ion.h>
25 #include <linux/iommu.h>
26 #include <linux/platform_device.h>
27 #include <linux/debugfs.h>
28 #include <media/v4l2-fh.h>
32 #include "cam_hw_ops.h"
33 #include <media/msmb_generic_buf_mgr.h>
35 static struct v4l2_device *msm_v4l2_dev;
36 static struct list_head ordered_sd_list;
37 static struct mutex ordered_sd_mtx;
38 static struct mutex v4l2_event_mtx;
40 static atomic_t qos_add_request_done = ATOMIC_INIT(0);
41 static struct pm_qos_request msm_v4l2_pm_qos_request;
43 static struct msm_queue_head *msm_session_q;
45 /* This variable represent daemon status
46 * true = daemon present (default state)
47 * false = daemon is NOT present
49 bool is_daemon_status = true;
51 /* config node envent queue */
52 static struct v4l2_fh *msm_eventq;
53 spinlock_t msm_eventq_lock;
55 static struct pid *msm_pid;
56 spinlock_t msm_pid_lock;
58 static uint32_t gpu_limit;
61 * It takes 20 bytes + NULL character to write the
62 * largest decimal value of an uint64_t
64 #define LOGSYNC_PACKET_SIZE 21
66 #define msm_dequeue(queue, type, member) ({ \
67 unsigned long flags; \
68 struct msm_queue_head *__q = (queue); \
70 spin_lock_irqsave(&__q->lock, flags); \
71 if (!list_empty(&__q->list)) { \
73 node = list_first_entry(&__q->list, \
75 if ((node) && (&node->member) && (&node->member.next)) \
76 list_del_init(&node->member); \
78 spin_unlock_irqrestore(&__q->lock, flags); \
82 #define msm_delete_sd_entry(queue, type, member, q_node) ({ \
83 unsigned long flags; \
84 struct msm_queue_head *__q = (queue); \
86 spin_lock_irqsave(&__q->lock, flags); \
87 if (!list_empty(&__q->list)) { \
88 list_for_each_entry(node, &__q->list, member) \
89 if (node->sd == q_node) { \
91 list_del_init(&node->member); \
96 spin_unlock_irqrestore(&__q->lock, flags); \
99 #define msm_delete_entry(queue, type, member, q_node) ({ \
100 unsigned long flags; \
101 struct msm_queue_head *__q = (queue); \
103 spin_lock_irqsave(&__q->lock, flags); \
104 if (!list_empty(&__q->list)) { \
105 list_for_each_entry(node, &__q->list, member) \
106 if (node == q_node) { \
108 list_del_init(&node->member); \
113 spin_unlock_irqrestore(&__q->lock, flags); \
116 #define msm_queue_drain(queue, type, member) do { \
117 unsigned long flags; \
118 struct msm_queue_head *__q = (queue); \
120 spin_lock_irqsave(&__q->lock, flags); \
121 while (!list_empty(&__q->list)) { \
123 node = list_first_entry(&__q->list, \
127 list_del_init(&node->member); \
131 spin_unlock_irqrestore(&__q->lock, flags); \
134 typedef int (*msm_queue_func)(void *d1, void *d2);
135 #define msm_queue_traverse_action(queue, type, member, func, data) do {\
136 unsigned long flags; \
137 struct msm_queue_head *__q = (queue); \
139 msm_queue_func __f = (func); \
140 spin_lock_irqsave(&__q->lock, flags); \
141 if (!list_empty(&__q->list)) { \
142 list_for_each_entry(node, &__q->list, member) \
147 spin_unlock_irqrestore(&__q->lock, flags); \
150 typedef int (*msm_queue_find_func)(void *d1, void *d2);
151 #define msm_queue_find(queue, type, member, func, data) ({\
152 unsigned long flags; \
153 struct msm_queue_head *__q = (queue); \
155 typeof(node) __ret = NULL; \
156 msm_queue_find_func __f = (func); \
157 spin_lock_irqsave(&__q->lock, flags); \
158 if (!list_empty(&__q->list)) { \
159 list_for_each_entry(node, &__q->list, member) \
160 if ((__f) && __f(node, data)) { \
165 spin_unlock_irqrestore(&__q->lock, flags); \
169 static void msm_init_queue(struct msm_queue_head *qhead)
173 INIT_LIST_HEAD(&qhead->list);
174 spin_lock_init(&qhead->lock);
179 static void msm_enqueue(struct msm_queue_head *qhead,
180 struct list_head *entry)
183 spin_lock_irqsave(&qhead->lock, flags);
185 if (qhead->len > qhead->max)
186 qhead->max = qhead->len;
187 list_add_tail(entry, &qhead->list);
188 spin_unlock_irqrestore(&qhead->lock, flags);
191 void msm_cam_copy_v4l2_subdev_fops(struct v4l2_file_operations *d1)
193 *d1 = v4l2_subdev_fops;
195 EXPORT_SYMBOL(msm_cam_copy_v4l2_subdev_fops);
197 static const struct v4l2_file_operations *msm_cam_get_v4l2_subdev_fops_ptr(
200 return &v4l2_subdev_fops;
203 /* index = session id */
204 static inline int __msm_queue_find_session(void *d1, void *d2)
206 struct msm_session *session = d1;
207 return (session->session_id == *(unsigned int *)d2) ? 1 : 0;
210 static inline int __msm_queue_find_stream(void *d1, void *d2)
212 struct msm_stream *stream = d1;
213 return (stream->stream_id == *(unsigned int *)d2) ? 1 : 0;
216 static inline int __msm_queue_find_command_ack_q(void *d1, void *d2)
218 struct msm_command_ack *ack = d1;
219 return (ack->stream_id == *(unsigned int *)d2) ? 1 : 0;
222 static inline void msm_pm_qos_add_request(void)
224 pr_info("%s: add request", __func__);
225 if (atomic_cmpxchg(&qos_add_request_done, 0, 1))
227 pm_qos_add_request(&msm_v4l2_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
228 PM_QOS_DEFAULT_VALUE);
231 static void msm_pm_qos_remove_request(void)
233 pr_info("%s: remove request", __func__);
234 pm_qos_remove_request(&msm_v4l2_pm_qos_request);
237 void msm_pm_qos_update_request(int val)
239 pr_info("%s: update request %d", __func__, val);
240 msm_pm_qos_add_request();
241 pm_qos_update_request(&msm_v4l2_pm_qos_request, val);
244 struct msm_session *msm_session_find(unsigned int session_id)
246 struct msm_session *session;
247 session = msm_queue_find(msm_session_q, struct msm_session,
248 list, __msm_queue_find_session, &session_id);
249 if (WARN_ON(!session))
253 EXPORT_SYMBOL(msm_session_find);
255 int msm_create_stream(unsigned int session_id,
256 unsigned int stream_id, struct vb2_queue *q)
258 struct msm_session *session;
259 struct msm_stream *stream;
261 session = msm_queue_find(msm_session_q, struct msm_session,
262 list, __msm_queue_find_session, &session_id);
266 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
270 stream->stream_id = stream_id;
272 spin_lock_init(&stream->stream_lock);
273 msm_enqueue(&session->stream_q, &stream->list);
274 session->stream_q.len++;
276 INIT_LIST_HEAD(&stream->queued_list);
280 EXPORT_SYMBOL(msm_create_stream);
282 void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
284 struct msm_session *session = NULL;
285 struct msm_stream *stream = NULL;
289 session = msm_queue_find(msm_session_q, struct msm_session,
290 list, __msm_queue_find_session, &session_id);
296 unsigned long wl_flags;
299 pr_err("%s : not able to delete stream %d\n",
304 write_lock_irqsave(&session->stream_rwlock, wl_flags);
306 stream = msm_queue_find(&session->stream_q, struct msm_stream,
307 list, __msm_queue_find_stream, &stream_id);
310 write_unlock_irqrestore(&session->stream_rwlock,
315 if (msm_vb2_get_stream_state(stream) != 1) {
316 write_unlock_irqrestore(&session->stream_rwlock,
321 spin_lock_irqsave(&(session->stream_q.lock), flags);
322 list_del_init(&stream->list);
323 session->stream_q.len--;
326 spin_unlock_irqrestore(&(session->stream_q.lock), flags);
327 write_unlock_irqrestore(&session->stream_rwlock, wl_flags);
332 EXPORT_SYMBOL(msm_delete_stream);
334 static void msm_sd_unregister_subdev(struct video_device *vdev)
336 struct v4l2_subdev *sd = video_get_drvdata(vdev);
341 static inline int __msm_sd_register_subdev(struct v4l2_subdev *sd)
344 struct video_device *vdev;
346 if (!msm_v4l2_dev || !sd || !sd->name[0])
349 rc = v4l2_device_register_subdev(msm_v4l2_dev, sd);
353 /* Register a device node for every subdev marked with the
354 * V4L2_SUBDEV_FL_HAS_DEVNODE flag.
356 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
359 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
365 video_set_drvdata(vdev, sd);
366 strlcpy(vdev->name, sd->name, sizeof(vdev->name));
367 vdev->v4l2_dev = msm_v4l2_dev;
368 vdev->fops = msm_cam_get_v4l2_subdev_fops_ptr();
369 vdev->release = msm_sd_unregister_subdev;
370 rc = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
377 #if defined(CONFIG_MEDIA_CONTROLLER)
378 sd->entity.info.dev.major = VIDEO_MAJOR;
379 sd->entity.info.dev.minor = vdev->minor;
380 sd->entity.name = video_device_node_name(vdev);
387 video_unregister_device(sd->devnode);
391 static void msm_add_sd_in_position(struct msm_sd_subdev *msm_subdev,
392 struct list_head *sd_list)
394 struct msm_sd_subdev *temp_sd;
396 list_for_each_entry(temp_sd, sd_list, list) {
397 if (temp_sd == msm_subdev) {
398 pr_err("%s :Fail to add the same sd %d\n",
402 if (msm_subdev->close_seq < temp_sd->close_seq) {
403 list_add_tail(&msm_subdev->list, &temp_sd->list);
407 list_add_tail(&msm_subdev->list, sd_list);
410 int msm_sd_register(struct msm_sd_subdev *msm_subdev)
412 if (WARN_ON(!msm_subdev))
415 if (WARN_ON(!msm_v4l2_dev) || WARN_ON(!msm_v4l2_dev->dev))
418 mutex_lock(&ordered_sd_mtx);
419 msm_add_sd_in_position(msm_subdev, &ordered_sd_list);
420 mutex_unlock(&ordered_sd_mtx);
421 return __msm_sd_register_subdev(&msm_subdev->sd);
423 EXPORT_SYMBOL(msm_sd_register);
425 int msm_sd_unregister(struct msm_sd_subdev *msm_subdev)
427 if (WARN_ON(!msm_subdev))
430 v4l2_device_unregister_subdev(&msm_subdev->sd);
433 EXPORT_SYMBOL(msm_sd_unregister);
435 static struct v4l2_subdev *msm_sd_find(const char *name)
438 struct v4l2_subdev *subdev = NULL;
439 struct v4l2_subdev *subdev_out = NULL;
441 spin_lock_irqsave(&msm_v4l2_dev->lock, flags);
442 if (!list_empty(&msm_v4l2_dev->subdevs)) {
443 list_for_each_entry(subdev, &msm_v4l2_dev->subdevs, list)
444 if (!strcmp(name, subdev->name)) {
449 spin_unlock_irqrestore(&msm_v4l2_dev->lock, flags);
454 int msm_create_session(unsigned int session_id, struct video_device *vdev)
456 struct msm_session *session = NULL;
458 if (!msm_session_q) {
459 pr_err("%s : session queue not available Line %d\n",
464 session = msm_queue_find(msm_session_q, struct msm_session,
465 list, __msm_queue_find_session, &session_id);
467 pr_err("%s: Session exist session_id=%d\n",
468 __func__, session_id);
472 session = kzalloc(sizeof(*session), GFP_KERNEL);
474 pr_err("%s : Memory not available Line %d\n",
479 session->session_id = session_id;
480 session->event_q.vdev = vdev;
481 msm_init_queue(&session->command_ack_q);
482 msm_init_queue(&session->stream_q);
483 msm_enqueue(msm_session_q, &session->list);
484 mutex_init(&session->lock);
485 mutex_init(&session->lock_q);
486 mutex_init(&session->close_lock);
487 rwlock_init(&session->stream_rwlock);
490 session->sysfs_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0);
491 if (session->sysfs_pwr_limit)
492 kgsl_pwr_limits_set_freq(session->sysfs_pwr_limit,
498 EXPORT_SYMBOL(msm_create_session);
500 int msm_create_command_ack_q(unsigned int session_id, unsigned int stream_id)
502 struct msm_session *session;
503 struct msm_command_ack *cmd_ack;
505 if (!msm_session_q) {
506 pr_err("%s : Session queue not available Line %d\n",
511 session = msm_queue_find(msm_session_q, struct msm_session,
512 list, __msm_queue_find_session, &session_id);
514 pr_err("%s : Session not found Line %d\n",
518 mutex_lock(&session->lock);
519 cmd_ack = kzalloc(sizeof(*cmd_ack), GFP_KERNEL);
521 mutex_unlock(&session->lock);
522 pr_err("%s : memory not available Line %d\n",
527 msm_init_queue(&cmd_ack->command_q);
528 INIT_LIST_HEAD(&cmd_ack->list);
529 init_completion(&cmd_ack->wait_complete);
530 cmd_ack->stream_id = stream_id;
532 msm_enqueue(&session->command_ack_q, &cmd_ack->list);
533 session->command_ack_q.len++;
534 mutex_unlock(&session->lock);
537 EXPORT_SYMBOL(msm_create_command_ack_q);
539 void msm_delete_command_ack_q(unsigned int session_id, unsigned int stream_id)
541 struct msm_session *session;
542 struct msm_command_ack *cmd_ack;
545 session = msm_queue_find(msm_session_q, struct msm_session,
546 list, __msm_queue_find_session, &session_id);
549 mutex_lock(&session->lock);
551 cmd_ack = msm_queue_find(&session->command_ack_q,
552 struct msm_command_ack, list, __msm_queue_find_command_ack_q,
555 mutex_unlock(&session->lock);
559 msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
561 spin_lock_irqsave(&(session->command_ack_q.lock), flags);
562 list_del_init(&cmd_ack->list);
564 session->command_ack_q.len--;
565 spin_unlock_irqrestore(&(session->command_ack_q.lock), flags);
566 mutex_unlock(&session->lock);
568 EXPORT_SYMBOL(msm_delete_command_ack_q);
570 static inline int __msm_sd_close_subdevs(struct msm_sd_subdev *msm_sd,
571 struct msm_sd_close_ioctl *sd_close)
573 struct v4l2_subdev *sd;
575 pr_debug("%s: Shutting down subdev %s", __func__, sd->name);
577 v4l2_subdev_call(sd, core, ioctl, MSM_SD_SHUTDOWN, sd_close);
578 v4l2_subdev_call(sd, core, s_power, 0);
583 static inline int __msm_sd_notify_freeze_subdevs(struct msm_sd_subdev *msm_sd,
586 struct v4l2_subdev *sd;
590 v4l2_subdev_call(sd, core, ioctl, MSM_SD_NOTIFY_FREEZE, NULL);
592 v4l2_subdev_call(sd, core, ioctl, MSM_SD_UNNOTIFY_FREEZE, NULL);
597 static inline int __msm_destroy_session_streams(void *d1, void *d2)
599 struct msm_stream *stream = d1;
602 pr_err("%s: Error: Destroyed list is not empty\n", __func__);
603 spin_lock_irqsave(&stream->stream_lock, flags);
604 INIT_LIST_HEAD(&stream->queued_list);
605 spin_unlock_irqrestore(&stream->stream_lock, flags);
609 static void msm_destroy_session_streams(struct msm_session *session)
615 msm_queue_traverse_action(&session->stream_q, struct msm_stream, list,
616 __msm_destroy_session_streams, NULL);
618 msm_queue_drain(&session->stream_q, struct msm_stream, list);
621 static inline int __msm_remove_session_cmd_ack_q(void *d1, void *d2)
623 struct msm_command_ack *cmd_ack = d1;
625 msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
630 static void msm_remove_session_cmd_ack_q(struct msm_session *session)
635 mutex_lock(&session->lock);
636 /* to ensure error handling purpose, it needs to detach all subdevs
637 * which are being connected to streams */
638 msm_queue_traverse_action(&session->command_ack_q,
639 struct msm_command_ack, list,
640 __msm_remove_session_cmd_ack_q, NULL);
642 msm_queue_drain(&session->command_ack_q, struct msm_command_ack, list);
644 mutex_unlock(&session->lock);
647 int msm_destroy_session(unsigned int session_id)
649 struct msm_session *session;
650 struct v4l2_subdev *buf_mgr_subdev;
651 struct msm_sd_close_ioctl session_info;
653 session = msm_queue_find(msm_session_q, struct msm_session,
654 list, __msm_queue_find_session, &session_id);
658 if (gpu_limit && session->sysfs_pwr_limit) {
659 kgsl_pwr_limits_set_default(session->sysfs_pwr_limit);
660 kgsl_pwr_limits_del(session->sysfs_pwr_limit);
663 msm_destroy_session_streams(session);
664 msm_remove_session_cmd_ack_q(session);
665 mutex_destroy(&session->lock);
666 mutex_destroy(&session->lock_q);
667 mutex_destroy(&session->close_lock);
668 msm_delete_entry(msm_session_q, struct msm_session,
670 buf_mgr_subdev = msm_sd_find("msm_buf_mngr");
671 if (buf_mgr_subdev) {
672 session_info.session = session_id;
673 session_info.stream = 0;
674 v4l2_subdev_call(buf_mgr_subdev, core, ioctl,
675 MSM_SD_SHUTDOWN, &session_info);
677 pr_err("%s: Buff manger device node is NULL\n", __func__);
682 EXPORT_SYMBOL(msm_destroy_session);
684 static int __msm_close_destry_session_notify_apps(void *d1, void *d2)
686 struct v4l2_event event;
687 struct msm_v4l2_event_data *event_data =
688 (struct msm_v4l2_event_data *)&event.u.data[0];
689 struct msm_session *session = d1;
691 event.type = MSM_CAMERA_V4L2_EVENT_TYPE;
692 event.id = MSM_CAMERA_MSM_NOTIFY;
693 event_data->command = MSM_CAMERA_PRIV_SHUTDOWN;
695 v4l2_event_queue(session->event_q.vdev, &event);
700 static int __msm_wakeup_all_cmdack_session_stream(void *d1, void *d2)
702 struct msm_stream *stream = d1;
703 struct msm_session *session = d2;
704 struct msm_command_ack *cmd_ack = NULL;
705 unsigned long spin_flags = 0;
707 cmd_ack = msm_queue_find(&session->command_ack_q,
708 struct msm_command_ack, list,
709 __msm_queue_find_command_ack_q,
712 spin_lock_irqsave(&(session->command_ack_q.lock),
714 complete(&cmd_ack->wait_complete);
715 spin_unlock_irqrestore(&(session->command_ack_q.lock),
721 static int __msm_close_wakeup_all_cmdack_session(void *d1, void *d2)
723 struct msm_stream *stream = NULL;
724 struct msm_session *session = d1;
726 stream = msm_queue_find(&session->stream_q, struct msm_stream,
727 list, __msm_wakeup_all_cmdack_session_stream, d1);
731 static long msm_private_ioctl(struct file *file, void *fh,
732 bool valid_prio, unsigned int cmd, void *arg)
735 struct msm_v4l2_event_data *event_data = arg;
736 struct v4l2_event event;
737 struct msm_session *session;
738 unsigned int session_id;
739 unsigned int stream_id;
740 unsigned long spin_flags = 0;
741 struct msm_sd_subdev *msm_sd;
743 if (cmd == MSM_CAM_V4L2_IOCTL_DAEMON_DISABLED) {
744 is_daemon_status = false;
752 case MSM_CAM_V4L2_IOCTL_NOTIFY:
753 case MSM_CAM_V4L2_IOCTL_CMD_ACK:
754 case MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG:
755 case MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR:
761 memset(&event, 0, sizeof(struct v4l2_event));
762 session_id = event_data->session_id;
763 stream_id = event_data->stream_id;
765 session = msm_queue_find(msm_session_q, struct msm_session,
766 list, __msm_queue_find_session, &session_id);
772 case MSM_CAM_V4L2_IOCTL_NOTIFY: {
773 if (WARN_ON(!session->event_q.vdev)) {
777 event.type = event_data->v4l2_event_type;
778 event.id = event_data->v4l2_event_id;
779 memcpy(&event.u.data, event_data,
780 sizeof(struct msm_v4l2_event_data));
781 v4l2_event_queue(session->event_q.vdev,
786 case MSM_CAM_V4L2_IOCTL_CMD_ACK: {
787 struct msm_command_ack *cmd_ack;
788 struct msm_command *ret_cmd;
790 ret_cmd = kzalloc(sizeof(*ret_cmd), GFP_KERNEL);
796 cmd_ack = msm_queue_find(&session->command_ack_q,
797 struct msm_command_ack, list,
798 __msm_queue_find_command_ack_q,
800 if (WARN_ON(!cmd_ack)) {
806 spin_lock_irqsave(&(session->command_ack_q.lock),
808 event.type = event_data->v4l2_event_type;
809 event.id = event_data->v4l2_event_id;
810 memcpy(&event.u.data, event_data,
811 sizeof(struct msm_v4l2_event_data));
812 memcpy(&ret_cmd->event, &event, sizeof(struct v4l2_event));
813 msm_enqueue(&cmd_ack->command_q, &ret_cmd->list);
814 complete(&cmd_ack->wait_complete);
815 spin_unlock_irqrestore(&(session->command_ack_q.lock),
820 case MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG: {
821 if (event_data->status) {
822 pr_err("%s:Notifying subdevs about potential sof freeze\n",
825 pr_err("%s:Notifying subdevs about sof recover\n",
829 mutex_lock(&ordered_sd_mtx);
830 if (!list_empty(&msm_v4l2_dev->subdevs)) {
831 list_for_each_entry(msm_sd, &ordered_sd_list, list)
832 __msm_sd_notify_freeze_subdevs(msm_sd,
835 mutex_unlock(&ordered_sd_mtx);
839 case MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR:
840 /* send v4l2_event to HAL next*/
841 msm_queue_traverse_action(msm_session_q,
842 struct msm_session, list,
843 __msm_close_destry_session_notify_apps, NULL);
854 static int msm_unsubscribe_event(struct v4l2_fh *fh,
855 const struct v4l2_event_subscription *sub)
859 mutex_lock(&v4l2_event_mtx);
860 rc = v4l2_event_unsubscribe(fh, sub);
861 mutex_unlock(&v4l2_event_mtx);
866 static int msm_subscribe_event(struct v4l2_fh *fh,
867 const struct v4l2_event_subscription *sub)
871 mutex_lock(&v4l2_event_mtx);
872 rc = v4l2_event_subscribe(fh, sub, 5, NULL);
873 mutex_unlock(&v4l2_event_mtx);
878 static const struct v4l2_ioctl_ops g_msm_ioctl_ops = {
879 .vidioc_subscribe_event = msm_subscribe_event,
880 .vidioc_unsubscribe_event = msm_unsubscribe_event,
881 .vidioc_default = msm_private_ioctl,
884 static unsigned int msm_poll(struct file *f,
885 struct poll_table_struct *pll_table)
888 struct v4l2_fh *eventq = f->private_data;
892 poll_wait(f, &eventq->wait, pll_table);
894 if (v4l2_event_pending(eventq))
895 rc = POLLIN | POLLRDNORM;
900 static void msm_print_event_error(struct v4l2_event *event)
902 struct msm_v4l2_event_data *event_data =
903 (struct msm_v4l2_event_data *)&event->u.data[0];
905 pr_err("Evt_type=%x Evt_id=%d Evt_cmd=%x\n", event->type,
906 event->id, event_data->command);
907 pr_err("Evt_session_id=%d Evt_stream_id=%d Evt_arg=%d\n",
908 event_data->session_id, event_data->stream_id,
909 event_data->arg_value);
912 /* something seriously wrong if msm_close is triggered
913 * !!! user space imaging server is shutdown !!!
915 int msm_post_event(struct v4l2_event *event, int timeout)
918 struct video_device *vdev;
919 struct msm_session *session;
920 struct msm_v4l2_event_data *event_data =
921 (struct msm_v4l2_event_data *)&event->u.data[0];
922 struct msm_command_ack *cmd_ack;
923 struct msm_command *cmd;
924 int session_id, stream_id;
925 unsigned long flags = 0;
927 session_id = event_data->session_id;
928 stream_id = event_data->stream_id;
930 spin_lock_irqsave(&msm_eventq_lock, flags);
932 spin_unlock_irqrestore(&msm_eventq_lock, flags);
933 pr_err("%s : msm event queue not available Line %d\n",
937 spin_unlock_irqrestore(&msm_eventq_lock, flags);
939 vdev = msm_eventq->vdev;
941 /* send to imaging server and wait for ACK */
942 session = msm_queue_find(msm_session_q, struct msm_session,
943 list, __msm_queue_find_session, &session_id);
944 if (WARN_ON(!session)) {
945 pr_err("%s : session not found Line %d\n",
949 mutex_lock(&session->lock);
950 cmd_ack = msm_queue_find(&session->command_ack_q,
951 struct msm_command_ack, list,
952 __msm_queue_find_command_ack_q, &stream_id);
953 if (WARN_ON(!cmd_ack)) {
954 mutex_unlock(&session->lock);
955 pr_err("%s : cmd_ack not found Line %d\n",
960 /*re-init wait_complete */
961 reinit_completion(&cmd_ack->wait_complete);
963 v4l2_event_queue(vdev, event);
966 mutex_unlock(&session->lock);
967 pr_debug("%s : timeout cannot be negative Line %d\n",
972 /* should wait on session based condition */
973 rc = wait_for_completion_timeout(&cmd_ack->wait_complete,
974 msecs_to_jiffies(timeout));
977 if (list_empty_careful(&cmd_ack->command_q.list)) {
979 pr_err("%s: Timed out\n", __func__);
980 msm_print_event_error(event);
981 mutex_unlock(&session->lock);
984 pr_err("%s: Error: No timeout but list empty!",
986 msm_print_event_error(event);
987 mutex_unlock(&session->lock);
992 cmd = msm_dequeue(&cmd_ack->command_q,
993 struct msm_command, list);
995 mutex_unlock(&session->lock);
996 pr_err("%s : cmd dequeue failed Line %d\n",
1001 event_data = (struct msm_v4l2_event_data *)cmd->event.u.data;
1003 /* compare cmd_ret and event */
1004 if (WARN_ON(event->type != cmd->event.type) ||
1005 WARN_ON(event->id != cmd->event.id)) {
1006 pr_err("%s : Either event type or id didnot match Line %d\n",
1007 __func__, __LINE__);
1008 pr_err("%s : event->type %d event->id %d\n", __func__,
1009 event->type, event->id);
1010 pr_err("%s : cmd->event.type %d cmd->event.id %d\n", __func__,
1011 cmd->event.type, cmd->event.id);
1015 *event = cmd->event;
1018 mutex_unlock(&session->lock);
1021 EXPORT_SYMBOL(msm_post_event);
1023 static int msm_close(struct file *filep)
1026 unsigned long flags;
1027 struct msm_video_device *pvdev = video_drvdata(filep);
1028 struct msm_sd_close_ioctl sd_close;
1029 struct msm_sd_subdev *msm_sd;
1031 /*stop all hardware blocks immediately*/
1032 mutex_lock(&ordered_sd_mtx);
1033 if (!list_empty(&msm_v4l2_dev->subdevs))
1034 list_for_each_entry(msm_sd, &ordered_sd_list, list)
1035 __msm_sd_close_subdevs(msm_sd, &sd_close);
1036 mutex_unlock(&ordered_sd_mtx);
1038 /* remove msm_v4l2_pm_qos_request */
1039 msm_pm_qos_remove_request();
1041 /* send v4l2_event to HAL next*/
1042 msm_queue_traverse_action(msm_session_q, struct msm_session, list,
1043 __msm_close_destry_session_notify_apps, NULL);
1045 msm_queue_traverse_action(msm_session_q, struct msm_session, list,
1046 __msm_close_wakeup_all_cmdack_session, NULL);
1048 spin_lock_irqsave(&msm_eventq_lock, flags);
1050 spin_unlock_irqrestore(&msm_eventq_lock, flags);
1051 v4l2_fh_release(filep);
1053 spin_lock_irqsave(&msm_pid_lock, flags);
1056 spin_unlock_irqrestore(&msm_pid_lock, flags);
1058 atomic_set(&pvdev->opened, 0);
1063 static inline void msm_list_switch(struct list_head *l1,
1064 struct list_head *l2)
1066 l1->next = l2->next;
1067 l2->prev = l1->prev;
1068 l1->prev->next = l2;
1069 l2->next->prev = l1;
1074 static int msm_open(struct file *filep)
1077 unsigned long flags;
1078 struct msm_video_device *pvdev = video_drvdata(filep);
1081 /* !!! only ONE open is allowed !!! */
1082 if (atomic_cmpxchg(&pvdev->opened, 0, 1))
1085 spin_lock_irqsave(&msm_pid_lock, flags);
1086 msm_pid = get_pid(task_pid(current));
1087 spin_unlock_irqrestore(&msm_pid_lock, flags);
1089 /* create event queue */
1090 rc = v4l2_fh_open(filep);
1094 spin_lock_irqsave(&msm_eventq_lock, flags);
1095 msm_eventq = filep->private_data;
1096 spin_unlock_irqrestore(&msm_eventq_lock, flags);
1098 /* register msm_v4l2_pm_qos_request */
1099 msm_pm_qos_add_request();
1104 static struct v4l2_file_operations msm_fops = {
1105 .owner = THIS_MODULE,
1108 .release = msm_close,
1109 .unlocked_ioctl = video_ioctl2,
1110 #ifdef CONFIG_COMPAT
1111 .compat_ioctl32 = video_ioctl2,
1115 struct msm_session *msm_get_session(unsigned int session_id)
1117 struct msm_session *session;
1119 session = msm_queue_find(msm_session_q, struct msm_session,
1120 list, __msm_queue_find_session, &session_id);
1122 return ERR_PTR(-EINVAL);
1126 EXPORT_SYMBOL(msm_get_session);
1129 struct msm_stream *msm_get_stream(struct msm_session *session,
1130 unsigned int stream_id)
1132 struct msm_stream *stream;
1134 stream = msm_queue_find(&session->stream_q, struct msm_stream,
1135 list, __msm_queue_find_stream, &stream_id);
1138 return ERR_PTR(-EINVAL);
1142 EXPORT_SYMBOL(msm_get_stream);
1144 struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id,
1145 unsigned int stream_id)
1147 struct msm_session *session;
1148 struct msm_stream *stream;
1150 session = msm_queue_find(msm_session_q, struct msm_session,
1151 list, __msm_queue_find_session, &session_id);
1155 stream = msm_queue_find(&session->stream_q, struct msm_stream,
1156 list, __msm_queue_find_stream, &stream_id);
1160 return stream->vb2_q;
1162 EXPORT_SYMBOL(msm_get_stream_vb2q);
1164 struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q)
1166 struct msm_session *session;
1167 struct msm_stream *stream;
1168 unsigned long flags1;
1169 unsigned long flags2;
1170 spin_lock_irqsave(&msm_session_q->lock, flags1);
1171 list_for_each_entry(session, &(msm_session_q->list), list) {
1172 spin_lock_irqsave(&(session->stream_q.lock), flags2);
1173 list_for_each_entry(
1174 stream, &(session->stream_q.list), list) {
1175 if (stream->vb2_q == q) {
1176 spin_unlock_irqrestore
1177 (&(session->stream_q.lock), flags2);
1178 spin_unlock_irqrestore
1179 (&msm_session_q->lock, flags1);
1183 spin_unlock_irqrestore(&(session->stream_q.lock), flags2);
1185 spin_unlock_irqrestore(&msm_session_q->lock, flags1);
1188 EXPORT_SYMBOL(msm_get_stream_from_vb2q);
1190 struct msm_session *msm_get_session_from_vb2q(struct vb2_queue *q)
1192 struct msm_session *session;
1193 struct msm_stream *stream;
1194 unsigned long flags1;
1195 unsigned long flags2;
1197 spin_lock_irqsave(&msm_session_q->lock, flags1);
1198 list_for_each_entry(session, &(msm_session_q->list), list) {
1199 spin_lock_irqsave(&(session->stream_q.lock), flags2);
1200 list_for_each_entry(
1201 stream, &(session->stream_q.list), list) {
1202 if (stream->vb2_q == q) {
1203 spin_unlock_irqrestore
1204 (&(session->stream_q.lock), flags2);
1205 spin_unlock_irqrestore
1206 (&msm_session_q->lock, flags1);
1210 spin_unlock_irqrestore(&(session->stream_q.lock), flags2);
1212 spin_unlock_irqrestore(&msm_session_q->lock, flags1);
1215 EXPORT_SYMBOL(msm_get_session_from_vb2q);
1218 #ifdef CONFIG_COMPAT
1219 long msm_copy_camera_private_ioctl_args(unsigned long arg,
1220 struct msm_camera_private_ioctl_arg *k_ioctl,
1221 void __user **tmp_compat_ioctl_ptr)
1223 struct msm_camera_private_ioctl_arg up_ioctl;
1225 if (WARN_ON(!arg || !k_ioctl || !tmp_compat_ioctl_ptr))
1228 if (copy_from_user(&up_ioctl,
1229 (struct msm_camera_private_ioctl_arg *)arg,
1230 sizeof(struct msm_camera_private_ioctl_arg)))
1233 k_ioctl->id = up_ioctl.id;
1234 k_ioctl->size = up_ioctl.size;
1235 k_ioctl->result = up_ioctl.result;
1236 k_ioctl->reserved = up_ioctl.reserved;
1237 *tmp_compat_ioctl_ptr = compat_ptr(up_ioctl.ioctl_ptr);
1241 EXPORT_SYMBOL(msm_copy_camera_private_ioctl_args);
1244 static void msm_sd_notify(struct v4l2_subdev *sd,
1245 unsigned int notification, void *arg)
1248 struct v4l2_subdev *subdev = NULL;
1253 /* Check if subdev exists before processing*/
1254 if (!msm_sd_find(sd->name))
1257 switch (notification) {
1258 case MSM_SD_NOTIFY_GET_SD: {
1259 struct msm_sd_req_sd *get_sd = arg;
1261 get_sd->subdev = msm_sd_find(get_sd->name);
1262 /* TODO: might need to add ref count on ret_sd */
1266 case MSM_SD_NOTIFY_PUT_SD: {
1267 struct msm_sd_req_sd *put_sd = arg;
1268 subdev = msm_sd_find(put_sd->name);
1272 case MSM_SD_NOTIFY_REQ_CB: {
1273 struct msm_sd_req_vb2_q *req_sd = arg;
1274 rc = msm_vb2_request_cb(req_sd);
1285 static ssize_t write_logsync(struct file *file, const char __user *buf,
1286 size_t count, loff_t *ppos)
1288 char lbuf[LOGSYNC_PACKET_SIZE] = {0};
1289 uint64_t seq_num = 0;
1292 if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1295 ret = sscanf(lbuf, "%llu", &seq_num);
1297 pr_err("LOGSYNC (Kernel): Bad or malformed sequence number\n");
1299 pr_debug("LOGSYNC (Kernel): seq_num = %llu\n", seq_num);
1305 static const struct file_operations logsync_fops = {
1306 .write = write_logsync,
1309 static int msm_probe(struct platform_device *pdev)
1311 struct msm_video_device *pvdev = NULL;
1312 static struct dentry *cam_debugfs_root;
1315 msm_v4l2_dev = kzalloc(sizeof(*msm_v4l2_dev),
1317 if (WARN_ON(!msm_v4l2_dev)) {
1322 pvdev = kzalloc(sizeof(struct msm_video_device),
1324 if (WARN_ON(!pvdev)) {
1329 pvdev->vdev = video_device_alloc();
1330 if (WARN_ON(!pvdev->vdev)) {
1335 #if defined(CONFIG_MEDIA_CONTROLLER)
1336 msm_v4l2_dev->mdev = kzalloc(sizeof(struct media_device),
1338 if (!msm_v4l2_dev->mdev) {
1342 strlcpy(msm_v4l2_dev->mdev->model, MSM_CONFIGURATION_NAME,
1343 sizeof(msm_v4l2_dev->mdev->model));
1344 msm_v4l2_dev->mdev->dev = &(pdev->dev);
1346 rc = media_device_register(msm_v4l2_dev->mdev);
1347 if (WARN_ON(rc < 0))
1350 if (WARN_ON((rc = media_entity_init(&pvdev->vdev->entity,
1354 pvdev->vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
1355 pvdev->vdev->entity.group_id = QCAMERA_VNODE_GROUP_ID;
1358 msm_v4l2_dev->notify = msm_sd_notify;
1360 pvdev->vdev->v4l2_dev = msm_v4l2_dev;
1362 rc = v4l2_device_register(&(pdev->dev), pvdev->vdev->v4l2_dev);
1363 if (WARN_ON(rc < 0))
1366 strlcpy(pvdev->vdev->name, "msm-config", sizeof(pvdev->vdev->name));
1367 pvdev->vdev->release = video_device_release;
1368 pvdev->vdev->fops = &msm_fops;
1369 pvdev->vdev->ioctl_ops = &g_msm_ioctl_ops;
1370 pvdev->vdev->minor = -1;
1371 pvdev->vdev->vfl_type = VFL_TYPE_GRABBER;
1372 rc = video_register_device(pvdev->vdev,
1373 VFL_TYPE_GRABBER, -1);
1374 if (WARN_ON(rc < 0))
1377 #if defined(CONFIG_MEDIA_CONTROLLER)
1378 /* FIXME: How to get rid of this messy? */
1379 pvdev->vdev->entity.name = video_device_node_name(pvdev->vdev);
1382 atomic_set(&pvdev->opened, 0);
1383 video_set_drvdata(pvdev->vdev, pvdev);
1385 msm_session_q = kzalloc(sizeof(*msm_session_q), GFP_KERNEL);
1386 if (WARN_ON(!msm_session_q))
1389 msm_init_queue(msm_session_q);
1390 spin_lock_init(&msm_eventq_lock);
1391 spin_lock_init(&msm_pid_lock);
1392 mutex_init(&ordered_sd_mtx);
1393 mutex_init(&v4l2_event_mtx);
1394 INIT_LIST_HEAD(&ordered_sd_list);
1396 cam_debugfs_root = debugfs_create_dir(MSM_CAM_LOGSYNC_FILE_BASEDIR,
1398 if (!cam_debugfs_root) {
1399 pr_warn("NON-FATAL: failed to create logsync base directory\n");
1401 if (!debugfs_create_file(MSM_CAM_LOGSYNC_FILE_NAME,
1406 pr_warn("NON-FATAL: failed to create logsync debugfs file\n");
1409 rc = cam_ahb_clk_init(pdev);
1411 pr_err("%s: failed to register ahb clocks\n", __func__);
1415 of_property_read_u32(pdev->dev.of_node,
1416 "qcom,gpu-limit", &gpu_limit);
1421 v4l2_device_unregister(pvdev->vdev->v4l2_dev);
1423 #if defined(CONFIG_MEDIA_CONTROLLER)
1424 media_entity_cleanup(&pvdev->vdev->entity);
1426 media_device_unregister(msm_v4l2_dev->mdev);
1428 kzfree(msm_v4l2_dev->mdev);
1431 video_device_release(pvdev->vdev);
1435 kzfree(msm_v4l2_dev);
1440 static const struct of_device_id msm_dt_match[] = {
1441 {.compatible = "qcom,msm-cam"},
1444 MODULE_DEVICE_TABLE(of, msm_dt_match);
1446 static struct platform_driver msm_driver = {
1450 .owner = THIS_MODULE,
1451 .of_match_table = msm_dt_match,
1455 static int __init msm_init(void)
1457 return platform_driver_register(&msm_driver);
1460 static void __exit msm_exit(void)
1462 platform_driver_unregister(&msm_driver);
1466 module_init(msm_init);
1467 module_exit(msm_exit);
1468 MODULE_DESCRIPTION("MSM V4L2 Camera");
1469 MODULE_LICENSE("GPL v2");