1 /* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
14 #include <linux/module.h>
15 #include <linux/workqueue.h>
16 #include <linux/delay.h>
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/ioctl.h>
20 #include <linux/spinlock.h>
21 #include <linux/proc_fs.h>
22 #include <linux/atomic.h>
23 #include <linux/videodev2.h>
24 #include <linux/msm_ion.h>
25 #include <linux/iommu.h>
26 #include <linux/platform_device.h>
27 #include <linux/debugfs.h>
28 #include <media/v4l2-fh.h>
32 #include "cam_hw_ops.h"
33 #include <media/ais/msm_ais_buf_mgr.h>
36 static struct v4l2_device *msm_v4l2_dev;
37 static struct list_head ordered_sd_list;
39 static struct pm_qos_request msm_v4l2_pm_qos_request;
41 static struct msm_queue_head *msm_session_q;
43 /* This variable represent daemon status
44 * true = daemon present (default state)
45 * false = daemon is NOT present
47 bool is_daemon_status = true;
49 /* config node envent queue */
50 static struct v4l2_fh *msm_eventq;
51 static spinlock_t msm_eventq_lock;
53 static struct pid *msm_pid;
54 static spinlock_t msm_pid_lock;
57 * It takes 20 bytes + NULL character to write the
58 * largest decimal value of an uint64_t
60 #define LOGSYNC_PACKET_SIZE 21
62 #define msm_dequeue(queue, type, member) ({ \
63 unsigned long flags; \
64 struct msm_queue_head *__q = (queue); \
66 spin_lock_irqsave(&__q->lock, flags); \
67 if (!list_empty(&__q->list)) { \
69 node = list_first_entry(&__q->list, \
71 if ((node) && (&node->member) && (&node->member.next)) \
72 list_del_init(&node->member); \
74 spin_unlock_irqrestore(&__q->lock, flags); \
78 #define msm_delete_sd_entry(queue, type, member, q_node) ({ \
79 unsigned long flags; \
80 struct msm_queue_head *__q = (queue); \
82 spin_lock_irqsave(&__q->lock, flags); \
83 if (!list_empty(&__q->list)) { \
84 list_for_each_entry(node, &__q->list, member) \
85 if (node->sd == q_node) { \
87 list_del_init(&node->member); \
92 spin_unlock_irqrestore(&__q->lock, flags); \
95 #define msm_delete_entry(queue, type, member, q_node) ({ \
96 unsigned long flags; \
97 struct msm_queue_head *__q = (queue); \
99 spin_lock_irqsave(&__q->lock, flags); \
100 if (!list_empty(&__q->list)) { \
101 list_for_each_entry(node, &__q->list, member) \
102 if (node == q_node) { \
104 list_del_init(&node->member); \
109 spin_unlock_irqrestore(&__q->lock, flags); \
112 #define msm_queue_drain(queue, type, member) do { \
113 unsigned long flags; \
114 struct msm_queue_head *__q = (queue); \
116 spin_lock_irqsave(&__q->lock, flags); \
117 while (!list_empty(&__q->list)) { \
119 node = list_first_entry(&__q->list, \
123 list_del_init(&node->member); \
127 spin_unlock_irqrestore(&__q->lock, flags); \
130 typedef int (*msm_queue_func)(void *d1, void *d2);
131 #define msm_queue_traverse_action(queue, type, member, func, data) do {\
132 unsigned long flags; \
133 struct msm_queue_head *__q = (queue); \
135 msm_queue_func __f = (func); \
136 spin_lock_irqsave(&__q->lock, flags); \
137 if (!list_empty(&__q->list)) { \
138 list_for_each_entry(node, &__q->list, member) \
143 spin_unlock_irqrestore(&__q->lock, flags); \
146 typedef int (*msm_queue_find_func)(void *d1, void *d2);
147 #define msm_queue_find(queue, type, member, func, data) ({\
148 unsigned long flags; \
149 struct msm_queue_head *__q = (queue); \
151 typeof(node) __ret = NULL; \
152 msm_queue_find_func __f = (func); \
153 spin_lock_irqsave(&__q->lock, flags); \
154 if (!list_empty(&__q->list)) { \
155 list_for_each_entry(node, &__q->list, member) \
156 if ((__f) && __f(node, data)) { \
161 spin_unlock_irqrestore(&__q->lock, flags); \
165 static void msm_init_queue(struct msm_queue_head *qhead)
170 INIT_LIST_HEAD(&qhead->list);
171 spin_lock_init(&qhead->lock);
176 static void msm_enqueue(struct msm_queue_head *qhead,
177 struct list_head *entry)
181 spin_lock_irqsave(&qhead->lock, flags);
183 if (qhead->len > qhead->max)
184 qhead->max = qhead->len;
185 list_add_tail(entry, &qhead->list);
186 spin_unlock_irqrestore(&qhead->lock, flags);
189 void msm_cam_copy_v4l2_subdev_fops(struct v4l2_file_operations *d1)
191 *d1 = v4l2_subdev_fops;
193 EXPORT_SYMBOL(msm_cam_copy_v4l2_subdev_fops);
195 static const struct v4l2_file_operations *msm_cam_get_v4l2_subdev_fops_ptr(
198 return &v4l2_subdev_fops;
201 /* index = session id */
202 static inline int __msm_queue_find_session(void *d1, void *d2)
204 struct msm_session *session = d1;
206 return (session->session_id == *(unsigned int *)d2) ? 1 : 0;
209 static inline int __msm_queue_find_stream(void *d1, void *d2)
211 struct msm_stream *stream = d1;
213 return (stream->stream_id == *(unsigned int *)d2) ? 1 : 0;
216 static inline int __msm_queue_find_command_ack_q(void *d1, void *d2)
218 struct msm_command_ack *ack = d1;
220 return (ack->stream_id == *(unsigned int *)d2) ? 1 : 0;
223 static void msm_pm_qos_add_request(void)
225 pr_info("%s: add request", __func__);
226 pm_qos_add_request(&msm_v4l2_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
227 PM_QOS_DEFAULT_VALUE);
230 static void msm_pm_qos_remove_request(void)
232 pr_info("%s: remove request", __func__);
233 pm_qos_remove_request(&msm_v4l2_pm_qos_request);
236 void msm_pm_qos_update_request(int val)
238 pr_info("%s: update request %d", __func__, val);
239 pm_qos_update_request(&msm_v4l2_pm_qos_request, val);
242 struct msm_session *msm_session_find(unsigned int session_id)
244 struct msm_session *session;
246 session = msm_queue_find(msm_session_q, struct msm_session,
247 list, __msm_queue_find_session, &session_id);
248 if (WARN_ON(!session))
252 EXPORT_SYMBOL(msm_session_find);
254 int msm_create_stream(unsigned int session_id,
255 unsigned int stream_id, struct vb2_queue *q)
257 struct msm_session *session;
258 struct msm_stream *stream;
260 session = msm_queue_find(msm_session_q, struct msm_session,
261 list, __msm_queue_find_session, &session_id);
265 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
269 stream->stream_id = stream_id;
271 spin_lock_init(&stream->stream_lock);
272 msm_enqueue(&session->stream_q, &stream->list);
273 session->stream_q.len++;
275 INIT_LIST_HEAD(&stream->queued_list);
279 EXPORT_SYMBOL(msm_create_stream);
281 void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
283 struct msm_session *session = NULL;
284 struct msm_stream *stream = NULL;
288 session = msm_queue_find(msm_session_q, struct msm_session,
289 list, __msm_queue_find_session, &session_id);
297 pr_err("%s : not able to delete stream %d\n",
302 write_lock(&session->stream_rwlock);
304 stream = msm_queue_find(&session->stream_q, struct msm_stream,
305 list, __msm_queue_find_stream, &stream_id);
308 write_unlock(&session->stream_rwlock);
312 if (msm_vb2_get_stream_state(stream) != 1) {
313 write_unlock(&session->stream_rwlock);
317 spin_lock_irqsave(&(session->stream_q.lock), flags);
318 list_del_init(&stream->list);
319 session->stream_q.len--;
322 spin_unlock_irqrestore(&(session->stream_q.lock), flags);
323 write_unlock(&session->stream_rwlock);
328 EXPORT_SYMBOL(msm_delete_stream);
330 static void msm_sd_unregister_subdev(struct video_device *vdev)
332 struct v4l2_subdev *sd = video_get_drvdata(vdev);
338 static inline int __msm_sd_register_subdev(struct v4l2_subdev *sd)
341 struct video_device *vdev;
343 if (!msm_v4l2_dev || !sd || !sd->name[0])
346 rc = v4l2_device_register_subdev(msm_v4l2_dev, sd);
350 /* Register a device node for every subdev marked with the
351 * V4L2_SUBDEV_FL_HAS_DEVNODE flag.
353 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
356 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
362 video_set_drvdata(vdev, sd);
363 strlcpy(vdev->name, sd->name, sizeof(vdev->name));
364 vdev->v4l2_dev = msm_v4l2_dev;
365 vdev->fops = msm_cam_get_v4l2_subdev_fops_ptr();
366 vdev->release = msm_sd_unregister_subdev;
367 rc = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
374 #if defined(CONFIG_MEDIA_CONTROLLER)
375 sd->entity.info.dev.major = VIDEO_MAJOR;
376 sd->entity.info.dev.minor = vdev->minor;
377 sd->entity.name = video_device_node_name(vdev);
384 video_unregister_device(sd->devnode);
388 static void msm_add_sd_in_position(struct msm_sd_subdev *msm_subdev,
389 struct list_head *sd_list)
391 struct msm_sd_subdev *temp_sd;
393 list_for_each_entry(temp_sd, sd_list, list) {
394 if (temp_sd == msm_subdev) {
397 if (msm_subdev->close_seq < temp_sd->close_seq) {
398 list_add_tail(&msm_subdev->list, &temp_sd->list);
402 list_add_tail(&msm_subdev->list, sd_list);
405 int msm_sd_register(struct msm_sd_subdev *msm_subdev)
407 if (WARN_ON(!msm_subdev))
410 if (WARN_ON(!msm_v4l2_dev) || WARN_ON(!msm_v4l2_dev->dev))
413 msm_add_sd_in_position(msm_subdev, &ordered_sd_list);
414 return __msm_sd_register_subdev(&msm_subdev->sd);
416 EXPORT_SYMBOL(msm_sd_register);
418 int msm_sd_unregister(struct msm_sd_subdev *msm_subdev)
420 if (WARN_ON(!msm_subdev))
423 v4l2_device_unregister_subdev(&msm_subdev->sd);
426 EXPORT_SYMBOL(msm_sd_unregister);
428 static struct v4l2_subdev *msm_sd_find(const char *name)
431 struct v4l2_subdev *subdev = NULL;
432 struct v4l2_subdev *subdev_out = NULL;
434 spin_lock_irqsave(&msm_v4l2_dev->lock, flags);
435 if (!list_empty(&msm_v4l2_dev->subdevs)) {
436 list_for_each_entry(subdev, &msm_v4l2_dev->subdevs, list)
437 if (!strcmp(name, subdev->name)) {
442 spin_unlock_irqrestore(&msm_v4l2_dev->lock, flags);
447 int msm_create_session(unsigned int session_id, struct video_device *vdev)
449 struct msm_session *session = NULL;
451 if (!msm_session_q) {
452 pr_err("%s : session queue not available Line %d\n",
457 session = msm_queue_find(msm_session_q, struct msm_session,
458 list, __msm_queue_find_session, &session_id);
460 pr_err("%s: Session exist session_id=%d\n",
461 __func__, session_id);
465 session = kzalloc(sizeof(*session), GFP_KERNEL);
469 session->session_id = session_id;
470 session->event_q.vdev = vdev;
471 msm_init_queue(&session->command_ack_q);
472 msm_init_queue(&session->stream_q);
473 msm_enqueue(msm_session_q, &session->list);
474 mutex_init(&session->lock);
475 mutex_init(&session->lock_q);
476 mutex_init(&session->close_lock);
477 rwlock_init(&session->stream_rwlock);
480 EXPORT_SYMBOL(msm_create_session);
482 int msm_create_command_ack_q(unsigned int session_id, unsigned int stream_id)
484 struct msm_session *session;
485 struct msm_command_ack *cmd_ack;
487 if (!msm_session_q) {
488 pr_err("%s : Session queue not available Line %d\n",
493 session = msm_queue_find(msm_session_q, struct msm_session,
494 list, __msm_queue_find_session, &session_id);
496 pr_err("%s : Session not found Line %d\n",
500 mutex_lock(&session->lock);
501 cmd_ack = kzalloc(sizeof(*cmd_ack), GFP_KERNEL);
503 mutex_unlock(&session->lock);
504 pr_err("%s : memory not available Line %d\n",
509 msm_init_queue(&cmd_ack->command_q);
510 INIT_LIST_HEAD(&cmd_ack->list);
511 init_completion(&cmd_ack->wait_complete);
512 cmd_ack->stream_id = stream_id;
514 msm_enqueue(&session->command_ack_q, &cmd_ack->list);
515 session->command_ack_q.len++;
516 mutex_unlock(&session->lock);
519 EXPORT_SYMBOL(msm_create_command_ack_q);
521 void msm_delete_command_ack_q(unsigned int session_id, unsigned int stream_id)
523 struct msm_session *session;
524 struct msm_command_ack *cmd_ack;
527 session = msm_queue_find(msm_session_q, struct msm_session,
528 list, __msm_queue_find_session, &session_id);
531 mutex_lock(&session->lock);
533 cmd_ack = msm_queue_find(&session->command_ack_q,
534 struct msm_command_ack, list, __msm_queue_find_command_ack_q,
537 mutex_unlock(&session->lock);
541 msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
543 spin_lock_irqsave(&(session->command_ack_q.lock), flags);
544 list_del_init(&cmd_ack->list);
546 session->command_ack_q.len--;
547 spin_unlock_irqrestore(&(session->command_ack_q.lock), flags);
548 mutex_unlock(&session->lock);
550 EXPORT_SYMBOL(msm_delete_command_ack_q);
552 static inline int __msm_sd_close_subdevs(struct msm_sd_subdev *msm_sd,
553 struct msm_sd_close_ioctl *sd_close)
555 struct v4l2_subdev *sd;
558 pr_debug("%s: Shutting down subdev %s", __func__, sd->name);
560 v4l2_subdev_call(sd, core, ioctl, MSM_SD_SHUTDOWN, sd_close);
561 v4l2_subdev_call(sd, core, s_power, 0);
566 static inline int __msm_sd_notify_freeze_subdevs(struct msm_sd_subdev *msm_sd,
569 struct v4l2_subdev *sd;
574 v4l2_subdev_call(sd, core, ioctl, MSM_SD_NOTIFY_FREEZE, NULL);
576 v4l2_subdev_call(sd, core, ioctl, MSM_SD_UNNOTIFY_FREEZE, NULL);
581 static inline int __msm_destroy_session_streams(void *d1, void *d2)
583 struct msm_stream *stream = d1;
586 pr_err("%s: Error: Destroyed list is not empty\n", __func__);
587 spin_lock_irqsave(&stream->stream_lock, flags);
588 INIT_LIST_HEAD(&stream->queued_list);
589 spin_unlock_irqrestore(&stream->stream_lock, flags);
593 static void msm_destroy_session_streams(struct msm_session *session)
599 msm_queue_traverse_action(&session->stream_q, struct msm_stream, list,
600 __msm_destroy_session_streams, NULL);
602 msm_queue_drain(&session->stream_q, struct msm_stream, list);
605 static inline int __msm_remove_session_cmd_ack_q(void *d1, void *d2)
607 struct msm_command_ack *cmd_ack = d1;
609 if (!(&cmd_ack->command_q))
612 msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
617 static void msm_remove_session_cmd_ack_q(struct msm_session *session)
619 if ((!session) || !(&session->command_ack_q))
622 mutex_lock(&session->lock);
623 /* to ensure error handling purpose, it needs to detach all subdevs
624 * which are being connected to streams
626 msm_queue_traverse_action(&session->command_ack_q,
627 struct msm_command_ack, list,
628 __msm_remove_session_cmd_ack_q, NULL);
630 msm_queue_drain(&session->command_ack_q, struct msm_command_ack, list);
632 mutex_unlock(&session->lock);
635 int msm_destroy_session(unsigned int session_id)
637 struct msm_session *session;
638 struct v4l2_subdev *buf_mgr_subdev;
639 struct msm_sd_close_ioctl session_info;
641 session = msm_queue_find(msm_session_q, struct msm_session,
642 list, __msm_queue_find_session, &session_id);
646 msm_destroy_session_streams(session);
647 msm_remove_session_cmd_ack_q(session);
648 mutex_destroy(&session->lock);
649 mutex_destroy(&session->lock_q);
650 mutex_destroy(&session->close_lock);
651 msm_delete_entry(msm_session_q, struct msm_session,
653 buf_mgr_subdev = msm_sd_find("msm_buf_mngr");
654 if (buf_mgr_subdev) {
655 session_info.session = session_id;
656 session_info.stream = 0;
657 v4l2_subdev_call(buf_mgr_subdev, core, ioctl,
658 MSM_SD_SHUTDOWN, &session_info);
660 pr_err("%s: Buff manger device node is NULL\n", __func__);
665 EXPORT_SYMBOL(msm_destroy_session);
667 static int __msm_close_destry_session_notify_apps(void *d1, void *d2)
669 struct v4l2_event event;
670 struct msm_v4l2_event_data *event_data =
671 (struct msm_v4l2_event_data *)&event.u.data[0];
672 struct msm_session *session = d1;
674 event.type = MSM_CAMERA_V4L2_EVENT_TYPE;
675 event.id = MSM_CAMERA_MSM_NOTIFY;
676 event_data->command = MSM_CAMERA_PRIV_SHUTDOWN;
678 v4l2_event_queue(session->event_q.vdev, &event);
683 static int __msm_wakeup_all_cmdack_session_stream(void *d1, void *d2)
685 struct msm_stream *stream = d1;
686 struct msm_session *session = d2;
687 struct msm_command_ack *cmd_ack = NULL;
688 unsigned long spin_flags = 0;
690 cmd_ack = msm_queue_find(&session->command_ack_q,
691 struct msm_command_ack, list,
692 __msm_queue_find_command_ack_q,
695 spin_lock_irqsave(&(session->command_ack_q.lock),
697 complete(&cmd_ack->wait_complete);
698 spin_unlock_irqrestore(&(session->command_ack_q.lock),
704 static int __msm_close_wakeup_all_cmdack_session(void *d1, void *d2)
706 struct msm_stream *stream = NULL;
707 struct msm_session *session = d1;
709 stream = msm_queue_find(&session->stream_q, struct msm_stream,
710 list, __msm_wakeup_all_cmdack_session_stream, d1);
714 static long msm_private_ioctl(struct file *file, void *fh,
715 bool valid_prio, unsigned int cmd, void *arg)
718 struct msm_v4l2_event_data *event_data = arg;
719 struct v4l2_event event;
720 struct msm_session *session;
721 unsigned int session_id;
722 unsigned int stream_id;
723 unsigned long spin_flags = 0;
724 struct msm_sd_subdev *msm_sd;
726 if (cmd == MSM_CAM_V4L2_IOCTL_DAEMON_DISABLED) {
727 is_daemon_status = false;
734 memset(&event, 0, sizeof(struct v4l2_event));
735 session_id = event_data->session_id;
736 stream_id = event_data->stream_id;
738 session = msm_queue_find(msm_session_q, struct msm_session,
739 list, __msm_queue_find_session, &session_id);
745 case MSM_CAM_V4L2_IOCTL_NOTIFY: {
746 if (WARN_ON(!session->event_q.vdev)) {
750 event.type = event_data->v4l2_event_type;
751 event.id = event_data->v4l2_event_id;
752 memcpy(&event.u.data, event_data,
753 sizeof(struct msm_v4l2_event_data));
754 v4l2_event_queue(session->event_q.vdev,
759 case MSM_CAM_V4L2_IOCTL_CMD_ACK: {
760 struct msm_command_ack *cmd_ack;
761 struct msm_command *ret_cmd;
763 ret_cmd = kzalloc(sizeof(*ret_cmd), GFP_KERNEL);
769 cmd_ack = msm_queue_find(&session->command_ack_q,
770 struct msm_command_ack, list,
771 __msm_queue_find_command_ack_q,
773 if (WARN_ON(!cmd_ack)) {
779 spin_lock_irqsave(&(session->command_ack_q.lock),
781 event.type = event_data->v4l2_event_type;
782 event.id = event_data->v4l2_event_id;
783 memcpy(&event.u.data, event_data,
784 sizeof(struct msm_v4l2_event_data));
785 memcpy(&ret_cmd->event, &event, sizeof(struct v4l2_event));
786 msm_enqueue(&cmd_ack->command_q, &ret_cmd->list);
787 complete(&cmd_ack->wait_complete);
788 spin_unlock_irqrestore(&(session->command_ack_q.lock),
793 case MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG: {
794 if (event_data->status) {
795 pr_err("%s:Notifying subdevs about potential sof freeze\n",
798 pr_err("%s:Notifying subdevs about sof recover\n",
802 if (!list_empty(&msm_v4l2_dev->subdevs)) {
803 list_for_each_entry(msm_sd, &ordered_sd_list, list)
804 __msm_sd_notify_freeze_subdevs(msm_sd,
810 case MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR:
811 /* send v4l2_event to HAL next*/
812 msm_queue_traverse_action(msm_session_q,
813 struct msm_session, list,
814 __msm_close_destry_session_notify_apps, NULL);
825 static int msm_unsubscribe_event(struct v4l2_fh *fh,
826 const struct v4l2_event_subscription *sub)
828 return v4l2_event_unsubscribe(fh, sub);
831 static int msm_subscribe_event(struct v4l2_fh *fh,
832 const struct v4l2_event_subscription *sub)
834 return v4l2_event_subscribe(fh, sub, 5, NULL);
837 static const struct v4l2_ioctl_ops g_msm_ioctl_ops = {
838 .vidioc_subscribe_event = msm_subscribe_event,
839 .vidioc_unsubscribe_event = msm_unsubscribe_event,
840 .vidioc_default = msm_private_ioctl,
843 static unsigned int msm_poll(struct file *f,
844 struct poll_table_struct *pll_table)
847 struct v4l2_fh *eventq = f->private_data;
849 if (WARN_ON(!eventq))
852 poll_wait(f, &eventq->wait, pll_table);
854 if (v4l2_event_pending(eventq))
855 rc = POLLIN | POLLRDNORM;
860 static void msm_print_event_error(struct v4l2_event *event)
862 struct msm_v4l2_event_data *event_data =
863 (struct msm_v4l2_event_data *)&event->u.data[0];
865 pr_err("Evt_type=%x Evt_id=%d Evt_cmd=%x\n", event->type,
866 event->id, event_data->command);
867 pr_err("Evt_session_id=%d Evt_stream_id=%d Evt_arg=%d\n",
868 event_data->session_id, event_data->stream_id,
869 event_data->arg_value);
872 /* something seriously wrong if msm_close is triggered
873 * !!! user space imaging server is shutdown !!!
875 int msm_post_event(struct v4l2_event *event, int timeout)
878 struct video_device *vdev;
879 struct msm_session *session;
880 struct msm_v4l2_event_data *event_data =
881 (struct msm_v4l2_event_data *)&event->u.data[0];
882 struct msm_command_ack *cmd_ack;
883 struct msm_command *cmd;
884 int session_id, stream_id;
885 unsigned long flags = 0;
887 session_id = event_data->session_id;
888 stream_id = event_data->stream_id;
890 spin_lock_irqsave(&msm_eventq_lock, flags);
892 spin_unlock_irqrestore(&msm_eventq_lock, flags);
893 pr_err("%s : msm event queue not available Line %d\n",
897 spin_unlock_irqrestore(&msm_eventq_lock, flags);
899 vdev = msm_eventq->vdev;
901 /* send to imaging server and wait for ACK */
902 session = msm_queue_find(msm_session_q, struct msm_session,
903 list, __msm_queue_find_session, &session_id);
904 if (WARN_ON(!session)) {
905 pr_err("%s : session not found Line %d\n",
909 mutex_lock(&session->lock);
910 cmd_ack = msm_queue_find(&session->command_ack_q,
911 struct msm_command_ack, list,
912 __msm_queue_find_command_ack_q, &stream_id);
913 if (WARN_ON(!cmd_ack)) {
914 mutex_unlock(&session->lock);
915 pr_err("%s : cmd_ack not found Line %d\n",
920 /* re-init wait_complete */
921 reinit_completion(&cmd_ack->wait_complete);
923 v4l2_event_queue(vdev, event);
926 mutex_unlock(&session->lock);
927 pr_debug("%s : timeout cannot be negative Line %d\n",
932 /* should wait on session based condition */
933 rc = wait_for_completion_timeout(&cmd_ack->wait_complete,
934 msecs_to_jiffies(timeout));
937 if (list_empty_careful(&cmd_ack->command_q.list)) {
939 pr_err("%s: Timed out\n", __func__);
940 msm_print_event_error(event);
941 mutex_unlock(&session->lock);
944 pr_err("%s: Error: No timeout but list empty!", __func__);
945 msm_print_event_error(event);
946 mutex_unlock(&session->lock);
950 cmd = msm_dequeue(&cmd_ack->command_q,
951 struct msm_command, list);
953 mutex_unlock(&session->lock);
954 pr_err("%s : cmd dequeue failed Line %d\n",
959 event_data = (struct msm_v4l2_event_data *)cmd->event.u.data;
961 /* compare cmd_ret and event */
962 if (WARN_ON(event->type != cmd->event.type) ||
963 WARN_ON(event->id != cmd->event.id)) {
964 pr_err("%s : Either event type or id didnot match Line %d\n",
966 pr_err("%s : event->type %d event->id %d\n", __func__,
967 event->type, event->id);
968 pr_err("%s : cmd->event.type %d cmd->event.id %d\n", __func__,
969 cmd->event.type, cmd->event.id);
976 mutex_unlock(&session->lock);
979 EXPORT_SYMBOL(msm_post_event);
981 static int msm_close(struct file *filep)
985 struct msm_video_device *pvdev = video_drvdata(filep);
986 struct msm_sd_close_ioctl sd_close;
987 struct msm_sd_subdev *msm_sd;
989 /* stop all hardware blocks immediately */
990 if (!list_empty(&msm_v4l2_dev->subdevs))
991 list_for_each_entry(msm_sd, &ordered_sd_list, list)
992 __msm_sd_close_subdevs(msm_sd, &sd_close);
994 /* remove msm_v4l2_pm_qos_request */
995 msm_pm_qos_remove_request();
997 /* send v4l2_event to HAL next*/
998 msm_queue_traverse_action(msm_session_q, struct msm_session, list,
999 __msm_close_destry_session_notify_apps, NULL);
1001 msm_queue_traverse_action(msm_session_q, struct msm_session, list,
1002 __msm_close_wakeup_all_cmdack_session, NULL);
1004 spin_lock_irqsave(&msm_eventq_lock, flags);
1006 spin_unlock_irqrestore(&msm_eventq_lock, flags);
1007 v4l2_fh_release(filep);
1009 spin_lock_irqsave(&msm_pid_lock, flags);
1012 spin_unlock_irqrestore(&msm_pid_lock, flags);
1014 atomic_set(&pvdev->opened, 0);
1019 static inline void msm_list_switch(struct list_head *l1,
1020 struct list_head *l2)
1022 l1->next = l2->next;
1023 l2->prev = l1->prev;
1024 l1->prev->next = l2;
1025 l2->next->prev = l1;
1030 static int msm_open(struct file *filep)
1033 unsigned long flags;
1034 struct msm_video_device *pvdev = video_drvdata(filep);
1036 if (WARN_ON(!pvdev))
1039 /* !!! only ONE open is allowed !!! */
1040 if (atomic_read(&pvdev->opened))
1043 atomic_set(&pvdev->opened, 1);
1045 spin_lock_irqsave(&msm_pid_lock, flags);
1046 msm_pid = get_pid(task_pid(current));
1047 spin_unlock_irqrestore(&msm_pid_lock, flags);
1049 /* create event queue */
1050 rc = v4l2_fh_open(filep);
1054 spin_lock_irqsave(&msm_eventq_lock, flags);
1055 msm_eventq = filep->private_data;
1056 spin_unlock_irqrestore(&msm_eventq_lock, flags);
1058 /* register msm_v4l2_pm_qos_request */
1059 msm_pm_qos_add_request();
1064 static struct v4l2_file_operations msm_fops = {
1065 .owner = THIS_MODULE,
1068 .release = msm_close,
1069 .unlocked_ioctl = video_ioctl2,
1070 #ifdef CONFIG_COMPAT
1071 .compat_ioctl32 = video_ioctl2,
1075 struct msm_session *msm_get_session(unsigned int session_id)
1077 struct msm_session *session;
1079 session = msm_queue_find(msm_session_q, struct msm_session,
1080 list, __msm_queue_find_session, &session_id);
1082 return ERR_PTR(-EINVAL);
1086 EXPORT_SYMBOL(msm_get_session);
1089 struct msm_stream *msm_get_stream(struct msm_session *session,
1090 unsigned int stream_id)
1092 struct msm_stream *stream;
1094 stream = msm_queue_find(&session->stream_q, struct msm_stream,
1095 list, __msm_queue_find_stream, &stream_id);
1098 return ERR_PTR(-EINVAL);
1102 EXPORT_SYMBOL(msm_get_stream);
1104 struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id,
1105 unsigned int stream_id)
1107 struct msm_session *session;
1108 struct msm_stream *stream;
1110 session = msm_queue_find(msm_session_q, struct msm_session,
1111 list, __msm_queue_find_session, &session_id);
1115 stream = msm_queue_find(&session->stream_q, struct msm_stream,
1116 list, __msm_queue_find_stream, &stream_id);
1120 return stream->vb2_q;
1122 EXPORT_SYMBOL(msm_get_stream_vb2q);
1124 struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q)
1126 struct msm_session *session;
1127 struct msm_stream *stream;
1128 unsigned long flags1;
1129 unsigned long flags2;
1131 spin_lock_irqsave(&msm_session_q->lock, flags1);
1132 list_for_each_entry(session, &(msm_session_q->list), list) {
1133 spin_lock_irqsave(&(session->stream_q.lock), flags2);
1134 list_for_each_entry(
1135 stream, &(session->stream_q.list), list) {
1136 if (stream->vb2_q == q) {
1137 spin_unlock_irqrestore
1138 (&(session->stream_q.lock), flags2);
1139 spin_unlock_irqrestore
1140 (&msm_session_q->lock, flags1);
1144 spin_unlock_irqrestore(&(session->stream_q.lock), flags2);
1146 spin_unlock_irqrestore(&msm_session_q->lock, flags1);
1149 EXPORT_SYMBOL(msm_get_stream_from_vb2q);
1151 struct msm_session *msm_get_session_from_vb2q(struct vb2_queue *q)
1153 struct msm_session *session;
1154 struct msm_stream *stream;
1155 unsigned long flags1;
1156 unsigned long flags2;
1158 spin_lock_irqsave(&msm_session_q->lock, flags1);
1159 list_for_each_entry(session, &(msm_session_q->list), list) {
1160 spin_lock_irqsave(&(session->stream_q.lock), flags2);
1161 list_for_each_entry(
1162 stream, &(session->stream_q.list), list) {
1163 if (stream->vb2_q == q) {
1164 spin_unlock_irqrestore
1165 (&(session->stream_q.lock), flags2);
1166 spin_unlock_irqrestore
1167 (&msm_session_q->lock, flags1);
1171 spin_unlock_irqrestore(&(session->stream_q.lock), flags2);
1173 spin_unlock_irqrestore(&msm_session_q->lock, flags1);
1176 EXPORT_SYMBOL(msm_get_session_from_vb2q);
1179 #ifdef CONFIG_COMPAT
1180 long msm_copy_camera_private_ioctl_args(unsigned long arg,
1181 struct msm_camera_private_ioctl_arg *k_ioctl,
1182 void __user **tmp_compat_ioctl_ptr)
1184 struct msm_camera_private_ioctl_arg up_ioctl;
1186 if (WARN_ON(!arg || !k_ioctl || !tmp_compat_ioctl_ptr))
1189 if (copy_from_user(&up_ioctl,
1191 sizeof(struct msm_camera_private_ioctl_arg)))
1194 k_ioctl->id = up_ioctl.id;
1195 k_ioctl->size = up_ioctl.size;
1196 k_ioctl->result = up_ioctl.result;
1197 k_ioctl->reserved = up_ioctl.reserved;
1198 *tmp_compat_ioctl_ptr = compat_ptr(up_ioctl.ioctl_ptr);
1202 EXPORT_SYMBOL(msm_copy_camera_private_ioctl_args);
1205 static void msm_sd_notify(struct v4l2_subdev *sd,
1206 unsigned int notification, void *arg)
1209 struct v4l2_subdev *subdev = NULL;
1211 if (WARN_ON(!sd) || WARN_ON(!arg))
1214 /* Check if subdev exists before processing*/
1215 if (!msm_sd_find(sd->name))
1218 switch (notification) {
1219 case MSM_SD_NOTIFY_GET_SD: {
1220 struct msm_sd_req_sd *get_sd = arg;
1222 get_sd->subdev = msm_sd_find(get_sd->name);
1223 /* TODO: might need to add ref count on ret_sd */
1227 case MSM_SD_NOTIFY_PUT_SD: {
1228 struct msm_sd_req_sd *put_sd = arg;
1230 subdev = msm_sd_find(put_sd->name);
1234 case MSM_SD_NOTIFY_REQ_CB: {
1235 struct msm_sd_req_vb2_q *req_sd = arg;
1237 rc = msm_vb2_request_cb(req_sd);
1248 static ssize_t write_logsync(struct file *file, const char __user *buf,
1249 size_t count, loff_t *ppos)
1251 char lbuf[LOGSYNC_PACKET_SIZE] = {0};
1252 uint64_t seq_num = 0;
1254 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
1257 if (kstrtoull(lbuf, 0, &seq_num) < 0)
1258 pr_err("LOGSYNC (Kernel): Bad or malformed sequence number\n");
1260 pr_debug("LOGSYNC (Kernel): seq_num = %llu\n", seq_num);
1266 static const struct file_operations logsync_fops = {
1267 .write = write_logsync,
1270 static int msm_probe(struct platform_device *pdev)
1272 struct msm_video_device *pvdev = NULL;
1273 static struct dentry *cam_debugfs_root;
1276 msm_v4l2_dev = kzalloc(sizeof(*msm_v4l2_dev),
1278 if (!msm_v4l2_dev) {
1283 pvdev = kzalloc(sizeof(struct msm_video_device),
1290 pvdev->vdev = video_device_alloc();
1296 #if defined(CONFIG_MEDIA_CONTROLLER)
1297 msm_v4l2_dev->mdev = kzalloc(sizeof(struct media_device),
1299 if (!msm_v4l2_dev->mdev) {
1303 strlcpy(msm_v4l2_dev->mdev->model, MSM_CONFIGURATION_NAME,
1304 sizeof(msm_v4l2_dev->mdev->model));
1305 msm_v4l2_dev->mdev->dev = &(pdev->dev);
1307 rc = media_device_register(msm_v4l2_dev->mdev);
1308 if (WARN_ON(rc < 0))
1311 if (WARN_ON((rc == media_entity_init(&pvdev->vdev->entity,
1315 pvdev->vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
1316 pvdev->vdev->entity.group_id = QCAMERA_VNODE_GROUP_ID;
1319 msm_v4l2_dev->notify = msm_sd_notify;
1321 pvdev->vdev->v4l2_dev = msm_v4l2_dev;
1323 rc = v4l2_device_register(&(pdev->dev), pvdev->vdev->v4l2_dev);
1324 if (WARN_ON(rc < 0))
1327 strlcpy(pvdev->vdev->name, "msm-config", sizeof(pvdev->vdev->name));
1328 pvdev->vdev->release = video_device_release;
1329 pvdev->vdev->fops = &msm_fops;
1330 pvdev->vdev->ioctl_ops = &g_msm_ioctl_ops;
1331 pvdev->vdev->minor = -1;
1332 pvdev->vdev->vfl_type = VFL_TYPE_GRABBER;
1333 rc = video_register_device(pvdev->vdev,
1334 VFL_TYPE_GRABBER, -1);
1335 if (WARN_ON(rc < 0))
1338 #if defined(CONFIG_MEDIA_CONTROLLER)
1339 /* FIXME: How to get rid of this messy? */
1340 pvdev->vdev->entity.name = video_device_node_name(pvdev->vdev);
1343 atomic_set(&pvdev->opened, 0);
1344 video_set_drvdata(pvdev->vdev, pvdev);
1346 msm_session_q = kzalloc(sizeof(*msm_session_q), GFP_KERNEL);
1350 msm_init_queue(msm_session_q);
1351 spin_lock_init(&msm_eventq_lock);
1352 spin_lock_init(&msm_pid_lock);
1353 INIT_LIST_HEAD(&ordered_sd_list);
1355 cam_debugfs_root = debugfs_create_dir(MSM_CAM_LOGSYNC_FILE_BASEDIR,
1357 if (!cam_debugfs_root) {
1358 pr_warn("NON-FATAL: failed to create logsync base directory\n");
1360 if (!debugfs_create_file(MSM_CAM_LOGSYNC_FILE_NAME,
1365 pr_warn("NON-FATAL: failed to create logsync debugfs file\n");
1368 rc = cam_ahb_clk_init(pdev);
1370 pr_err("%s: failed to register ahb clocks\n", __func__);
1377 v4l2_device_unregister(pvdev->vdev->v4l2_dev);
1379 #if defined(CONFIG_MEDIA_CONTROLLER)
1380 media_entity_cleanup(&pvdev->vdev->entity);
1382 media_device_unregister(msm_v4l2_dev->mdev);
1384 kzfree(msm_v4l2_dev->mdev);
1387 video_device_release(pvdev->vdev);
1391 kzfree(msm_v4l2_dev);
1396 static const struct of_device_id msm_dt_match[] = {
1397 {.compatible = "qcom,msm-cam"},
1400 MODULE_DEVICE_TABLE(of, msm_dt_match);
1402 static struct platform_driver msm_driver = {
1406 .owner = THIS_MODULE,
1407 .of_match_table = msm_dt_match,
1411 static int __init msm_init(void)
1413 return platform_driver_register(&msm_driver);
1416 static void __exit msm_exit(void)
1418 platform_driver_unregister(&msm_driver);
1422 module_init(msm_init);
1423 module_exit(msm_exit);
1424 MODULE_DESCRIPTION("MSM V4L2 Camera");
1425 MODULE_LICENSE("GPL v2");