1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/mutex.h>
15 #include <media/v4l2-subdev.h>
16 #include <linux/ratelimit.h>
19 #include "msm_isp_util.h"
20 #include "msm_isp_axi_util.h"
21 #include "msm_isp_stats_util.h"
22 #include "msm_camera_io_util.h"
23 #include "cam_smmu_api.h"
24 #define CREATE_TRACE_POINTS
25 #include "trace/events/msm_cam.h"
27 #define MAX_ISP_V4l2_EVENTS 100
28 #define MAX_ISP_REG_LIST 100
29 #define MAX_ISP_CMD_NUM 10
30 #define MAX_ISP_CMD_LEN 4096
31 static DEFINE_MUTEX(bandwidth_mgr_mutex);
32 static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
34 static uint64_t msm_isp_cpp_clk_rate;
35 static struct dump_ping_pong_state dump_data;
36 static struct dump_ping_pong_state tasklet_data;
37 static DEFINE_SPINLOCK(dump_irq_lock);
38 static DEFINE_SPINLOCK(dump_tasklet_lock);
40 #define VFE40_8974V2_VERSION 0x1001001A
42 void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
48 for (i = 0; i < 4; i++) {
49 text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
50 if ((text[i] < '0') || (text[i] > 'z')) {
51 pr_err("%s: Invalid output format %d (unprintable)\n",
52 origin, fourcc_format);
56 pr_err("%s: Invalid output format %s\n",
60 int msm_isp_init_bandwidth_mgr(struct vfe_device *vfe_dev,
61 enum msm_isp_hw_client client)
65 mutex_lock(&bandwidth_mgr_mutex);
66 if (isp_bandwidth_mgr.client_info[client].active) {
67 mutex_unlock(&bandwidth_mgr_mutex);
70 isp_bandwidth_mgr.client_info[client].active = 1;
71 isp_bandwidth_mgr.use_count++;
72 if (vfe_dev && !isp_bandwidth_mgr.bus_client) {
73 rc = vfe_dev->hw_info->vfe_ops.platform_ops.init_bw_mgr(vfe_dev,
76 isp_bandwidth_mgr.update_bw =
77 vfe_dev->hw_info->vfe_ops.platform_ops.update_bw;
78 isp_bandwidth_mgr.deinit_bw_mgr =
79 vfe_dev->hw_info->vfe_ops.platform_ops.deinit_bw_mgr;
83 isp_bandwidth_mgr.use_count--;
84 isp_bandwidth_mgr.client_info[client].active = 0;
87 mutex_unlock(&bandwidth_mgr_mutex);
91 int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
92 uint64_t ab, uint64_t ib)
96 mutex_lock(&bandwidth_mgr_mutex);
97 if (!isp_bandwidth_mgr.use_count ||
98 !isp_bandwidth_mgr.bus_client) {
99 pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
100 __func__, isp_bandwidth_mgr.use_count,
101 isp_bandwidth_mgr.bus_client);
102 mutex_unlock(&bandwidth_mgr_mutex);
106 isp_bandwidth_mgr.client_info[client].ab = ab;
107 isp_bandwidth_mgr.client_info[client].ib = ib;
108 rc = isp_bandwidth_mgr.update_bw(&isp_bandwidth_mgr);
109 mutex_unlock(&bandwidth_mgr_mutex);
113 void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
115 if (client >= MAX_ISP_CLIENT) {
116 pr_err("invalid Client id %d", client);
119 mutex_lock(&bandwidth_mgr_mutex);
120 memset(&isp_bandwidth_mgr.client_info[client], 0,
121 sizeof(struct msm_isp_bandwidth_info));
122 if (isp_bandwidth_mgr.use_count) {
123 isp_bandwidth_mgr.use_count--;
124 if (isp_bandwidth_mgr.use_count) {
125 mutex_unlock(&bandwidth_mgr_mutex);
129 if (!isp_bandwidth_mgr.bus_client) {
130 pr_err("%s:%d error: bus client invalid\n",
132 mutex_unlock(&bandwidth_mgr_mutex);
136 isp_bandwidth_mgr.deinit_bw_mgr(
139 mutex_unlock(&bandwidth_mgr_mutex);
142 void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
143 struct msm_isp_statistics *stats)
145 stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
146 stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
147 stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
149 stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
150 stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
151 stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
153 stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
154 stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
155 stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
156 stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
157 stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
158 stats->vfe_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
159 stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
162 void msm_isp_util_update_clk_rate(long clock_rate)
164 msm_isp_cpp_clk_rate = clock_rate;
167 uint32_t msm_isp_get_framedrop_period(
168 enum msm_vfe_frame_skip_pattern frame_skip_pattern)
170 switch (frame_skip_pattern) {
179 return frame_skip_pattern + 1;
192 void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
193 struct vfe_device *vfe_dev)
197 do_gettimeofday(&(time_stamp->event_time));
198 if (vfe_dev->vt_enable) {
199 msm_isp_get_avtimer_ts(time_stamp);
200 time_stamp->buf_time.tv_sec = time_stamp->vt_time.tv_sec;
201 time_stamp->buf_time.tv_usec = time_stamp->vt_time.tv_usec;
203 get_monotonic_boottime(&ts);
204 time_stamp->buf_time.tv_sec = ts.tv_sec;
205 time_stamp->buf_time.tv_usec = ts.tv_nsec/1000;
210 static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
212 u32 evt_id = ISP_EVENT_SUBS_MASK_NONE;
215 case ISP_EVENT_MASK_INDEX_STATS_NOTIFY:
216 evt_id = ISP_EVENT_STATS_NOTIFY;
218 case ISP_EVENT_MASK_INDEX_ERROR:
219 evt_id = ISP_EVENT_ERROR;
221 case ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT:
222 evt_id = ISP_EVENT_IOMMU_P_FAULT;
224 case ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE:
225 evt_id = ISP_EVENT_STREAM_UPDATE_DONE;
227 case ISP_EVENT_MASK_INDEX_REG_UPDATE:
228 evt_id = ISP_EVENT_REG_UPDATE;
230 case ISP_EVENT_MASK_INDEX_SOF:
231 evt_id = ISP_EVENT_SOF;
233 case ISP_EVENT_MASK_INDEX_BUF_DIVERT:
234 evt_id = ISP_EVENT_BUF_DIVERT;
236 case ISP_EVENT_MASK_INDEX_BUF_DONE:
237 evt_id = ISP_EVENT_BUF_DONE;
239 case ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY:
240 evt_id = ISP_EVENT_COMP_STATS_NOTIFY;
242 case ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE:
243 evt_id = ISP_EVENT_FE_READ_DONE;
245 case ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH:
246 evt_id = ISP_EVENT_PING_PONG_MISMATCH;
248 case ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING:
249 evt_id = ISP_EVENT_REG_UPDATE_MISSING;
251 case ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR:
252 evt_id = ISP_EVENT_BUF_FATAL_ERROR;
255 evt_id = ISP_EVENT_SUBS_MASK_NONE;
262 static inline int msm_isp_subscribe_event_mask(struct v4l2_fh *fh,
263 struct v4l2_event_subscription *sub, int evt_mask_index,
264 u32 evt_id, bool subscribe_flag)
266 int rc = 0, i, interface;
268 if (evt_mask_index == ISP_EVENT_MASK_INDEX_STATS_NOTIFY) {
269 for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
270 sub->type = evt_id + i;
272 rc = v4l2_event_subscribe(fh, sub,
273 MAX_ISP_V4l2_EVENTS, NULL);
275 rc = v4l2_event_unsubscribe(fh, sub);
277 pr_err("%s: Subs event_type =0x%x failed\n",
278 __func__, sub->type);
282 } else if (evt_mask_index == ISP_EVENT_MASK_INDEX_SOF ||
283 evt_mask_index == ISP_EVENT_MASK_INDEX_REG_UPDATE ||
284 evt_mask_index == ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE) {
285 for (interface = 0; interface < VFE_SRC_MAX; interface++) {
286 sub->type = evt_id | interface;
288 rc = v4l2_event_subscribe(fh, sub,
289 MAX_ISP_V4l2_EVENTS, NULL);
291 rc = v4l2_event_unsubscribe(fh, sub);
293 pr_err("%s: Subs event_type =0x%x failed\n",
294 __func__, sub->type);
301 rc = v4l2_event_subscribe(fh, sub,
302 MAX_ISP_V4l2_EVENTS, NULL);
304 rc = v4l2_event_unsubscribe(fh, sub);
306 pr_err("%s: Subs event_type =0x%x failed\n",
307 __func__, sub->type);
314 static inline int msm_isp_process_event_subscription(struct v4l2_fh *fh,
315 struct v4l2_event_subscription *sub, bool subscribe_flag)
317 int rc = 0, evt_mask_index = 0;
318 u32 evt_mask = sub->type;
321 if (evt_mask == ISP_EVENT_SUBS_MASK_NONE) {
322 pr_err("%s: Subs event_type is None=0x%x\n",
327 for (evt_mask_index = ISP_EVENT_MASK_INDEX_STATS_NOTIFY;
328 evt_mask_index <= ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR;
330 if (evt_mask & (1<<evt_mask_index)) {
331 evt_id = msm_isp_evt_mask_to_isp_event(evt_mask_index);
332 rc = msm_isp_subscribe_event_mask(fh, sub,
333 evt_mask_index, evt_id, subscribe_flag);
335 pr_err("%s: Subs event index:%d failed\n",
336 __func__, evt_mask_index);
344 int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
345 struct v4l2_event_subscription *sub)
347 return msm_isp_process_event_subscription(fh, sub, true);
350 int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
351 struct v4l2_event_subscription *sub)
353 return msm_isp_process_event_subscription(fh, sub, false);
356 static int msm_isp_start_fetch_engine(struct vfe_device *vfe_dev,
359 struct msm_vfe_fetch_eng_start *fe_cfg = arg;
361 * For Offline VFE, HAL expects same frame id
362 * for offline output which it requested in do_reprocess.
364 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
366 return vfe_dev->hw_info->vfe_ops.core_ops.
367 start_fetch_eng(vfe_dev, arg);
370 static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
373 struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
374 struct msm_vfe_axi_stream *stream_info = NULL;
376 uint32_t wm_reload_mask = 0;
378 * For Offline VFE, HAL expects same frame id
379 * for offline output which it requested in do_reprocess.
381 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
384 if (fe_cfg->offline_pass == OFFLINE_SECOND_PASS) {
385 stream_info = &vfe_dev->axi_data.stream_info[
386 HANDLE_TO_IDX(fe_cfg->output_stream_id)];
388 pr_err("%s: Couldn't find streamid 0x%X\n", __func__,
389 fe_cfg->output_stream_id);
392 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
394 msm_isp_reset_framedrop(vfe_dev, stream_info);
396 rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
397 VFE_PING_FLAG, fe_cfg->output_buf_idx);
399 pr_err("%s: Fetch engine config failed\n", __func__);
402 for (i = 0; i < stream_info->num_planes; i++) {
403 vfe_dev->hw_info->vfe_ops.axi_ops.
404 enable_wm(vfe_dev->vfe_base, stream_info->wm[i],
406 wm_reload_mask |= (1 << stream_info->wm[i]);
408 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
410 vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
411 vfe_dev->vfe_base, wm_reload_mask);
413 return vfe_dev->hw_info->vfe_ops.core_ops.
414 start_fetch_eng_multi_pass(vfe_dev, arg);
417 void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
418 struct msm_vfe_fetch_engine_info *fetch_engine_info)
420 struct msm_isp_event_data fe_rd_done_event;
422 memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data));
423 fe_rd_done_event.frame_id =
424 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
425 fe_rd_done_event.u.fetch_done.session_id =
426 fetch_engine_info->session_id;
427 fe_rd_done_event.u.fetch_done.stream_id = fetch_engine_info->stream_id;
428 fe_rd_done_event.u.fetch_done.handle = fetch_engine_info->bufq_handle;
429 fe_rd_done_event.u.fetch_done.buf_idx = fetch_engine_info->buf_idx;
430 fe_rd_done_event.u.fetch_done.fd = fetch_engine_info->fd;
431 fe_rd_done_event.u.fetch_done.offline_mode =
432 fetch_engine_info->offline_mode;
434 ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
435 __func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
436 fetch_engine_info->is_busy = 0;
437 msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
440 static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
441 struct msm_vfe_input_cfg *input_cfg)
444 struct msm_vfe_pix_cfg *pix_cfg = NULL;
446 pr_debug("%s: entry\n", __func__);
448 if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
449 pr_err("%s: pixel path is active\n", __func__);
453 pix_cfg = &input_cfg->d.pix_cfg;
455 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
456 input_cfg->input_pix_clk;
457 vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
458 input_cfg->d.pix_cfg.input_mux;
459 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
460 input_cfg->d.pix_cfg.input_format;
461 vfe_dev->axi_data.src_info[VFE_PIX_0].sof_counter_step = 1;
464 * Fill pixel_clock into input_pix_clk so that user space
465 * can use rounded clk rate
467 input_cfg->input_pix_clk =
468 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock;
470 ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
471 input_cfg->d.pix_cfg.input_mux, CAMIF,
472 input_cfg->d.pix_cfg.input_format);
474 if (input_cfg->d.pix_cfg.input_mux == CAMIF ||
475 input_cfg->d.pix_cfg.input_mux == TESTGEN) {
476 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
477 input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
478 if (input_cfg->d.pix_cfg.camif_cfg.subsample_cfg.
479 sof_counter_step > 0) {
480 vfe_dev->axi_data.src_info[VFE_PIX_0].
481 sof_counter_step = input_cfg->d.pix_cfg.
482 camif_cfg.subsample_cfg.sof_counter_step;
484 } else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
485 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
486 input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
488 vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
489 vfe_dev, &input_cfg->d.pix_cfg);
490 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_PIX_0);
492 pr_debug("%s: exit\n", __func__);
497 static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
498 struct msm_vfe_input_cfg *input_cfg)
502 if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
503 pr_err("%s: RAW%d path is active\n", __func__,
504 input_cfg->input_src - VFE_RAW_0);
508 vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
509 input_cfg->input_pix_clk;
510 vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
511 vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
515 static int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
518 struct msm_vfe_input_cfg *input_cfg = arg;
519 long pixel_clock = 0;
521 switch (input_cfg->input_src) {
523 rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
528 rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
531 pr_err("%s: Invalid input source\n", __func__);
535 pixel_clock = input_cfg->input_pix_clk;
536 rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
539 pr_err("%s: clock set rate failed\n", __func__);
545 static int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg)
548 struct msm_vfe_camif_cfg *camif_cfg = arg;
549 struct msm_vfe_input_cfg input_cfg;
550 long pixel_clock = 0;
552 pr_debug("%s: entry\n", __func__);
554 memset(&input_cfg, 0, sizeof(input_cfg));
556 input_cfg.input_src = VFE_PIX_0;
557 input_cfg.input_pix_clk = 320000000;
558 input_cfg.d.pix_cfg.camif_cfg = *camif_cfg;
560 /* populate values from operation cfg */
561 input_cfg.d.pix_cfg.input_mux =
562 vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux;
563 input_cfg.d.pix_cfg.camif_cfg.camif_input =
564 vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input;
566 rc = msm_isp_cfg_pix(vfe_dev, &input_cfg);
568 pixel_clock = input_cfg.input_pix_clk;
569 rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
572 pr_err("%s: clock set rate failed\n", __func__);
576 pr_debug("%s: exit\n", __func__);
582 static int msm_isp_operation_cfg(struct vfe_device *vfe_dev, void *arg)
584 struct msm_vfe_operation_cfg *op_cfg = arg;
586 pr_debug("%s: entry\n", __func__);
588 vfe_dev->hvx_cmd = op_cfg->hvx_cmd;
589 vfe_dev->is_split = 0; /* default to false */
591 /* yuv_cosited currently not used */
592 /* pixel input select not used */
594 vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
596 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_pattern =
597 op_cfg->pixel_pattern;
598 vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input = op_cfg->camif_input;
600 pr_debug("%s: exit\n", __func__);
606 static int msm_isp_set_dual_HW_master_slave_mode(
607 struct vfe_device *vfe_dev, void *arg)
610 * This method assumes no 2 processes are accessing it simultaneously.
611 * Currently this is guaranteed by mutex lock in ioctl.
612 * If that changes, need to revisit this
615 struct msm_isp_set_dual_hw_ms_cmd *dual_hw_ms_cmd = NULL;
616 struct msm_vfe_src_info *src_info = NULL;
619 if (!vfe_dev || !arg) {
620 pr_err("%s: Error! Invalid input vfe_dev %pK arg %pK\n",
621 __func__, vfe_dev, arg);
625 dual_hw_ms_cmd = (struct msm_isp_set_dual_hw_ms_cmd *)arg;
626 vfe_dev->common_data->ms_resource.dual_hw_type = DUAL_HW_MASTER_SLAVE;
627 vfe_dev->vfe_ub_policy = MSM_WM_UB_EQUAL_SLICING;
628 if (dual_hw_ms_cmd->primary_intf < VFE_SRC_MAX) {
629 ISP_DBG("%s: vfe %d primary_intf %d\n", __func__,
630 vfe_dev->pdev->id, dual_hw_ms_cmd->primary_intf);
631 src_info = &vfe_dev->axi_data.
632 src_info[dual_hw_ms_cmd->primary_intf];
633 src_info->dual_hw_ms_info.dual_hw_ms_type =
634 dual_hw_ms_cmd->dual_hw_ms_type;
637 /* No lock needed here since ioctl lock protects 2 session from race */
638 if (src_info != NULL &&
639 dual_hw_ms_cmd->dual_hw_ms_type == MS_TYPE_MASTER) {
640 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
641 ISP_DBG("%s: vfe %d Master\n", __func__, vfe_dev->pdev->id);
643 src_info->dual_hw_ms_info.sof_info =
644 &vfe_dev->common_data->ms_resource.master_sof_info;
645 vfe_dev->common_data->ms_resource.sof_delta_threshold =
646 dual_hw_ms_cmd->sof_delta_threshold;
647 } else if (src_info != NULL) {
649 &vfe_dev->common_data->common_dev_data_lock,
651 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
652 ISP_DBG("%s: vfe %d Slave\n", __func__, vfe_dev->pdev->id);
654 for (j = 0; j < MS_NUM_SLAVE_MAX; j++) {
655 if (vfe_dev->common_data->ms_resource.
656 reserved_slave_mask & (1 << j))
659 vfe_dev->common_data->ms_resource.reserved_slave_mask |=
661 vfe_dev->common_data->ms_resource.num_slave++;
662 src_info->dual_hw_ms_info.sof_info =
663 &vfe_dev->common_data->ms_resource.
665 src_info->dual_hw_ms_info.slave_id = j;
666 ISP_DBG("%s: Slave id %d\n", __func__, j);
669 spin_unlock_irqrestore(
670 &vfe_dev->common_data->common_dev_data_lock,
673 if (j == MS_NUM_SLAVE_MAX) {
674 pr_err("%s: Error! Cannot find free aux resource\n",
679 ISP_DBG("%s: vfe %d num_src %d\n", __func__, vfe_dev->pdev->id,
680 dual_hw_ms_cmd->num_src);
681 if (dual_hw_ms_cmd->num_src > VFE_SRC_MAX) {
682 pr_err("%s: Error! Invalid num_src %d\n", __func__,
683 dual_hw_ms_cmd->num_src);
686 /* This for loop is for non-primary intf to be marked with Master/Slave
687 * in order for frame id sync. But their timestamp is not saved.
688 * So no sof_info resource is allocated
690 for (i = 0; i < dual_hw_ms_cmd->num_src; i++) {
691 if (dual_hw_ms_cmd->input_src[i] >= VFE_SRC_MAX) {
692 pr_err("%s: Error! Invalid SRC param %d\n", __func__,
693 dual_hw_ms_cmd->input_src[i]);
696 ISP_DBG("%s: vfe %d src %d type %d\n", __func__,
697 vfe_dev->pdev->id, dual_hw_ms_cmd->input_src[i],
698 dual_hw_ms_cmd->dual_hw_ms_type);
699 src_info = &vfe_dev->axi_data.
700 src_info[dual_hw_ms_cmd->input_src[i]];
701 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
702 src_info->dual_hw_ms_info.dual_hw_ms_type =
703 dual_hw_ms_cmd->dual_hw_ms_type;
709 static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
713 struct msm_vfe_cfg_cmd_list *proc_cmd =
714 (struct msm_vfe_cfg_cmd_list *)arg;
715 struct msm_vfe_cfg_cmd_list cmd, cmd_next;
717 if (!vfe_dev || !arg) {
718 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
723 rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
725 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
730 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
731 pr_err("%s:%d failed: next size %u != expected %zu\n",
732 __func__, __LINE__, cmd.next_size,
733 sizeof(struct msm_vfe_cfg_cmd_list));
736 if (++count >= MAX_ISP_REG_LIST) {
737 pr_err("%s:%d Error exceeding the max register count:%u\n",
738 __func__, __LINE__, count);
742 if (copy_from_user(&cmd_next, (void __user *)cmd.next,
743 sizeof(struct msm_vfe_cfg_cmd_list))) {
748 rc = msm_isp_proc_cmd(vfe_dev, &cmd_next.cfg_cmd);
750 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
758 struct msm_vfe_cfg_cmd2_32 {
761 compat_caddr_t cfg_data;
762 compat_caddr_t cfg_cmd;
765 struct msm_vfe_cfg_cmd_list_32 {
766 struct msm_vfe_cfg_cmd2_32 cfg_cmd;
771 #define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
772 _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
773 #define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
774 _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
776 static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
777 struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
779 proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
780 proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
781 proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
782 proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
785 static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
789 struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
790 (struct msm_vfe_cfg_cmd_list_32 *)arg;
791 struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
792 struct msm_vfe_cfg_cmd2 current_cmd;
794 if (!vfe_dev || !arg) {
795 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
799 msm_isp_compat_to_proc_cmd(¤t_cmd, &proc_cmd->cfg_cmd);
800 rc = msm_isp_proc_cmd(vfe_dev, ¤t_cmd);
802 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
806 while (compat_ptr(cmd.next) != NULL) {
807 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
808 pr_err("%s:%d failed: next size %u != expected %zu\n",
809 __func__, __LINE__, cmd.next_size,
810 sizeof(struct msm_vfe_cfg_cmd_list));
813 if (++count >= MAX_ISP_REG_LIST) {
814 pr_err("%s:%d Error exceeding the max register count:%u\n",
815 __func__, __LINE__, count);
819 if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
820 sizeof(struct msm_vfe_cfg_cmd_list_32))) {
825 msm_isp_compat_to_proc_cmd(¤t_cmd, &cmd_next.cfg_cmd);
826 rc = msm_isp_proc_cmd(vfe_dev, ¤t_cmd);
828 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
835 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
837 if (is_compat_task())
838 return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
840 return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
842 #else /* CONFIG_COMPAT */
843 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
845 return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
847 #endif /* CONFIG_COMPAT */
849 static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
850 unsigned int cmd, void *arg)
854 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
856 if (!vfe_dev || !vfe_dev->vfe_base) {
857 pr_err("%s:%d failed: invalid params %pK\n",
858 __func__, __LINE__, vfe_dev);
860 pr_err("%s:%d failed %pK\n", __func__,
861 __LINE__, vfe_dev->vfe_base);
865 /* use real time mutex for hard real-time ioctls such as
866 * buffer operations and register updates.
867 * Use core mutex for other ioctls that could take
868 * longer time to complete such as start/stop ISP streams
869 * which blocks until the hardware start/stop streaming
871 ISP_DBG("%s: cmd: %d\n", __func__, _IOC_TYPE(cmd));
873 case VIDIOC_MSM_VFE_REG_CFG: {
874 mutex_lock(&vfe_dev->realtime_mutex);
875 rc = msm_isp_proc_cmd(vfe_dev, arg);
876 mutex_unlock(&vfe_dev->realtime_mutex);
879 case VIDIOC_MSM_VFE_REG_LIST_CFG: {
880 mutex_lock(&vfe_dev->realtime_mutex);
881 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
882 mutex_unlock(&vfe_dev->realtime_mutex);
885 case VIDIOC_MSM_ISP_REQUEST_BUFQ:
887 case VIDIOC_MSM_ISP_ENQUEUE_BUF:
889 case VIDIOC_MSM_ISP_DEQUEUE_BUF:
891 case VIDIOC_MSM_ISP_UNMAP_BUF: {
892 mutex_lock(&vfe_dev->buf_mgr->lock);
893 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
894 mutex_unlock(&vfe_dev->buf_mgr->lock);
897 case VIDIOC_MSM_ISP_RELEASE_BUFQ: {
898 if (vfe_dev->buf_mgr == NULL) {
899 pr_err("%s: buf mgr NULL! rc = -1\n", __func__);
903 mutex_lock(&vfe_dev->buf_mgr->lock);
904 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
905 mutex_unlock(&vfe_dev->buf_mgr->lock);
908 case VIDIOC_MSM_ISP_REQUEST_STREAM:
909 mutex_lock(&vfe_dev->core_mutex);
910 rc = msm_isp_request_axi_stream(vfe_dev, arg);
911 mutex_unlock(&vfe_dev->core_mutex);
913 case VIDIOC_MSM_ISP_RELEASE_STREAM:
914 mutex_lock(&vfe_dev->core_mutex);
915 rc = msm_isp_release_axi_stream(vfe_dev, arg);
916 mutex_unlock(&vfe_dev->core_mutex);
918 case VIDIOC_MSM_ISP_CFG_STREAM:
919 mutex_lock(&vfe_dev->core_mutex);
920 rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
921 mutex_unlock(&vfe_dev->core_mutex);
923 case VIDIOC_MSM_ISP_CFG_HW_STATE:
924 mutex_lock(&vfe_dev->core_mutex);
925 rc = msm_isp_update_stream_bandwidth(vfe_dev,
926 *(enum msm_vfe_hw_state *)arg);
927 mutex_unlock(&vfe_dev->core_mutex);
929 case VIDIOC_MSM_ISP_AXI_HALT:
930 mutex_lock(&vfe_dev->core_mutex);
931 rc = msm_isp_axi_halt(vfe_dev, arg);
932 mutex_unlock(&vfe_dev->core_mutex);
934 case VIDIOC_MSM_ISP_AXI_RESET:
935 mutex_lock(&vfe_dev->core_mutex);
936 if (atomic_read(&vfe_dev->error_info.overflow_state)
938 rc = msm_isp_stats_reset(vfe_dev);
939 rc2 = msm_isp_axi_reset(vfe_dev, arg);
943 pr_err_ratelimited("%s: no HW reset, halt enforced.\n",
946 mutex_unlock(&vfe_dev->core_mutex);
948 case VIDIOC_MSM_ISP_AXI_RESTART:
949 mutex_lock(&vfe_dev->core_mutex);
950 if (atomic_read(&vfe_dev->error_info.overflow_state)
952 rc = msm_isp_stats_restart(vfe_dev);
953 rc2 = msm_isp_axi_restart(vfe_dev, arg);
957 pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
960 mutex_unlock(&vfe_dev->core_mutex);
962 case VIDIOC_MSM_ISP_INPUT_CFG:
963 mutex_lock(&vfe_dev->core_mutex);
964 rc = msm_isp_cfg_input(vfe_dev, arg);
965 mutex_unlock(&vfe_dev->core_mutex);
967 case VIDIOC_MSM_ISP_AHB_CLK_CFG:
968 mutex_lock(&vfe_dev->core_mutex);
969 if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
970 rc = vfe_dev->hw_info->vfe_ops.core_ops.
971 ahb_clk_cfg(vfe_dev, arg);
974 mutex_unlock(&vfe_dev->core_mutex);
976 case VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE:
977 mutex_lock(&vfe_dev->core_mutex);
978 rc = msm_isp_set_dual_HW_master_slave_mode(vfe_dev, arg);
979 mutex_unlock(&vfe_dev->core_mutex);
981 case VIDIOC_MSM_ISP_FETCH_ENG_START:
982 case VIDIOC_MSM_ISP_MAP_BUF_START_FE:
983 mutex_lock(&vfe_dev->core_mutex);
984 rc = msm_isp_start_fetch_engine(vfe_dev, arg);
985 mutex_unlock(&vfe_dev->core_mutex);
988 case VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START:
989 case VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE:
990 mutex_lock(&vfe_dev->core_mutex);
991 rc = msm_isp_start_fetch_engine_multi_pass(vfe_dev, arg);
992 mutex_unlock(&vfe_dev->core_mutex);
994 case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
996 enum msm_vfe_input_src frame_src =
997 *((enum msm_vfe_input_src *)arg);
998 vfe_dev->hw_info->vfe_ops.core_ops.
999 reg_update(vfe_dev, frame_src);
1002 case VIDIOC_MSM_ISP_SET_SRC_STATE:
1003 mutex_lock(&vfe_dev->core_mutex);
1004 rc = msm_isp_set_src_state(vfe_dev, arg);
1005 mutex_unlock(&vfe_dev->core_mutex);
1007 case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
1008 mutex_lock(&vfe_dev->core_mutex);
1009 rc = msm_isp_request_stats_stream(vfe_dev, arg);
1010 mutex_unlock(&vfe_dev->core_mutex);
1012 case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
1013 mutex_lock(&vfe_dev->core_mutex);
1014 rc = msm_isp_release_stats_stream(vfe_dev, arg);
1015 mutex_unlock(&vfe_dev->core_mutex);
1017 case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
1018 mutex_lock(&vfe_dev->core_mutex);
1019 rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
1020 mutex_unlock(&vfe_dev->core_mutex);
1022 case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
1023 mutex_lock(&vfe_dev->core_mutex);
1024 rc = msm_isp_update_stats_stream(vfe_dev, arg);
1025 mutex_unlock(&vfe_dev->core_mutex);
1027 case VIDIOC_MSM_ISP_UPDATE_STREAM:
1028 mutex_lock(&vfe_dev->core_mutex);
1029 rc = msm_isp_update_axi_stream(vfe_dev, arg);
1030 mutex_unlock(&vfe_dev->core_mutex);
1032 case VIDIOC_MSM_ISP_SMMU_ATTACH:
1033 mutex_lock(&vfe_dev->core_mutex);
1034 rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
1035 mutex_unlock(&vfe_dev->core_mutex);
1037 case VIDIOC_MSM_ISP_OPERATION_CFG:
1038 mutex_lock(&vfe_dev->core_mutex);
1039 msm_isp_operation_cfg(vfe_dev, arg);
1040 mutex_unlock(&vfe_dev->core_mutex);
1042 case VIDIOC_MSM_ISP_AXI_OUTPUT_CFG:
1043 mutex_lock(&vfe_dev->core_mutex);
1044 rc = msm_isp_axi_output_cfg(vfe_dev, arg);
1045 mutex_unlock(&vfe_dev->core_mutex);
1047 case VIDIOC_MSM_ISP_CAMIF_CFG:
1048 mutex_lock(&vfe_dev->core_mutex);
1049 rc = msm_isp_camif_cfg(vfe_dev, arg);
1050 mutex_unlock(&vfe_dev->core_mutex);
1052 case MSM_SD_NOTIFY_FREEZE:
1053 vfe_dev->isp_sof_debug = 0;
1054 vfe_dev->isp_raw0_debug = 0;
1055 vfe_dev->isp_raw1_debug = 0;
1056 vfe_dev->isp_raw2_debug = 0;
1058 case MSM_SD_UNNOTIFY_FREEZE:
1060 case MSM_SD_SHUTDOWN:
1061 while (vfe_dev->vfe_open_cnt != 0)
1062 msm_isp_close_node(sd, NULL);
1066 pr_err_ratelimited("%s: Invalid ISP command %d\n", __func__,
1074 #ifdef CONFIG_COMPAT
1075 static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
1076 unsigned int cmd, void *arg)
1078 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
1081 if (!vfe_dev || !vfe_dev->vfe_base) {
1082 pr_err("%s:%d failed: invalid params %pK\n",
1083 __func__, __LINE__, vfe_dev);
1085 pr_err("%s:%d failed %pK\n", __func__,
1086 __LINE__, vfe_dev->vfe_base);
1091 case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
1092 struct msm_vfe_cfg_cmd2 proc_cmd;
1094 mutex_lock(&vfe_dev->realtime_mutex);
1095 msm_isp_compat_to_proc_cmd(&proc_cmd,
1096 (struct msm_vfe_cfg_cmd2_32 *) arg);
1097 rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
1098 mutex_unlock(&vfe_dev->realtime_mutex);
1101 case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
1102 mutex_lock(&vfe_dev->realtime_mutex);
1103 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
1104 mutex_unlock(&vfe_dev->realtime_mutex);
1108 return msm_isp_ioctl_unlocked(sd, cmd, arg);
1114 long msm_isp_ioctl(struct v4l2_subdev *sd,
1115 unsigned int cmd, void *arg)
1117 return msm_isp_ioctl_compat(sd, cmd, arg);
1119 #else /* CONFIG_COMPAT */
1120 long msm_isp_ioctl(struct v4l2_subdev *sd,
1121 unsigned int cmd, void *arg)
1123 return msm_isp_ioctl_unlocked(sd, cmd, arg);
1125 #endif /* CONFIG_COMPAT */
1127 static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
1128 struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
1129 uint32_t *cfg_data, uint32_t cmd_len)
1131 if (!vfe_dev || !reg_cfg_cmd) {
1132 pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
1133 __LINE__, vfe_dev, reg_cfg_cmd);
1136 if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
1137 (!cfg_data || !cmd_len)) {
1138 pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
1139 __func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
1144 /* Validate input parameters */
1145 switch (reg_cfg_cmd->cmd_type) {
1148 case VFE_WRITE_MB: {
1149 if ((reg_cfg_cmd->u.rw_info.reg_offset >
1150 (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1151 ((reg_cfg_cmd->u.rw_info.reg_offset +
1152 reg_cfg_cmd->u.rw_info.len) >
1153 vfe_dev->vfe_base_size) ||
1154 (reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
1155 pr_err_ratelimited("%s:%d regoffset %d len %d res %d\n",
1157 reg_cfg_cmd->u.rw_info.reg_offset,
1158 reg_cfg_cmd->u.rw_info.len,
1159 (uint32_t)vfe_dev->vfe_base_size);
1163 if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
1164 (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1165 ((reg_cfg_cmd->u.rw_info.cmd_data_offset +
1166 reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
1167 pr_err_ratelimited("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
1169 reg_cfg_cmd->u.rw_info.cmd_data_offset,
1170 reg_cfg_cmd->u.rw_info.len, cmd_len);
1176 case VFE_WRITE_DMI_16BIT:
1177 case VFE_WRITE_DMI_32BIT:
1178 case VFE_WRITE_DMI_64BIT:
1179 case VFE_READ_DMI_16BIT:
1180 case VFE_READ_DMI_32BIT:
1181 case VFE_READ_DMI_64BIT: {
1182 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
1183 reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1184 if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
1185 reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
1186 (reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
1187 reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
1188 (sizeof(uint32_t)))) {
1189 pr_err("%s:%d hi %d lo %d\n",
1191 reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1192 reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
1195 if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
1196 pr_err("%s:%d len %d\n",
1198 reg_cfg_cmd->u.dmi_info.len);
1202 reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
1203 (reg_cfg_cmd->u.dmi_info.len -
1204 sizeof(uint32_t))) ||
1205 ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
1206 reg_cfg_cmd->u.dmi_info.len -
1207 sizeof(uint32_t)) > cmd_len)) {
1208 pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
1210 reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1211 reg_cfg_cmd->u.dmi_info.len, cmd_len);
1215 if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
1216 (UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
1217 ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
1218 reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
1219 pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
1221 reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
1222 reg_cfg_cmd->u.dmi_info.len, cmd_len);
1232 switch (reg_cfg_cmd->cmd_type) {
1234 msm_camera_io_memcpy(vfe_dev->vfe_base +
1235 reg_cfg_cmd->u.rw_info.reg_offset,
1237 (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
1238 reg_cfg_cmd->u.rw_info.len);
1241 case VFE_WRITE_MB: {
1242 msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
1243 reg_cfg_cmd->u.rw_info.reg_offset,
1245 (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
1246 reg_cfg_cmd->u.rw_info.len);
1249 case VFE_CFG_MASK: {
1252 unsigned long flags;
1254 if ((UINT_MAX - sizeof(temp) <
1255 reg_cfg_cmd->u.mask_info.reg_offset) ||
1256 (vfe_dev->vfe_base_size <
1257 reg_cfg_cmd->u.mask_info.reg_offset +
1259 (reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
1260 pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
1263 grab_lock = vfe_dev->hw_info->vfe_ops.core_ops.
1264 is_module_cfg_lock_needed(reg_cfg_cmd->
1265 u.mask_info.reg_offset);
1267 spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
1268 temp = msm_camera_io_r(vfe_dev->vfe_base +
1269 reg_cfg_cmd->u.mask_info.reg_offset);
1271 temp &= ~reg_cfg_cmd->u.mask_info.mask;
1272 temp |= reg_cfg_cmd->u.mask_info.val;
1273 msm_camera_io_w(temp, vfe_dev->vfe_base +
1274 reg_cfg_cmd->u.mask_info.reg_offset);
1276 spin_unlock_irqrestore(&vfe_dev->shared_data_lock,
1280 case VFE_WRITE_DMI_16BIT:
1281 case VFE_WRITE_DMI_32BIT:
1282 case VFE_WRITE_DMI_64BIT: {
1284 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1285 uint32_t hi_val, lo_val, lo_val1;
1287 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
1288 hi_tbl_ptr = cfg_data +
1289 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1291 lo_tbl_ptr = cfg_data +
1292 reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1293 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
1294 reg_cfg_cmd->u.dmi_info.len =
1295 reg_cfg_cmd->u.dmi_info.len / 2;
1296 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1297 lo_val = *lo_tbl_ptr++;
1298 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
1299 lo_val1 = lo_val & 0x0000FFFF;
1300 lo_val = (lo_val & 0xFFFF0000)>>16;
1301 msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
1302 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1303 } else if (reg_cfg_cmd->cmd_type ==
1304 VFE_WRITE_DMI_64BIT) {
1306 hi_val = *hi_tbl_ptr;
1307 hi_tbl_ptr = hi_tbl_ptr + 2;
1308 msm_camera_io_w(hi_val, vfe_dev->vfe_base +
1309 vfe_dev->hw_info->dmi_reg_offset);
1311 msm_camera_io_w(lo_val, vfe_dev->vfe_base +
1312 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1316 case VFE_READ_DMI_16BIT:
1317 case VFE_READ_DMI_32BIT:
1318 case VFE_READ_DMI_64BIT: {
1320 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1321 uint32_t hi_val, lo_val, lo_val1;
1323 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1324 hi_tbl_ptr = cfg_data +
1325 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1328 lo_tbl_ptr = cfg_data +
1329 reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1331 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
1332 reg_cfg_cmd->u.dmi_info.len =
1333 reg_cfg_cmd->u.dmi_info.len / 2;
1335 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1336 lo_val = msm_camera_io_r(vfe_dev->vfe_base +
1337 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1339 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
1340 lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
1341 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1342 lo_val |= lo_val1 << 16;
1344 *lo_tbl_ptr++ = lo_val;
1345 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1346 hi_val = msm_camera_io_r(vfe_dev->vfe_base +
1347 vfe_dev->hw_info->dmi_reg_offset);
1348 *hi_tbl_ptr = hi_val;
1355 case VFE_HW_UPDATE_LOCK: {
1356 uint32_t update_id =
1357 vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
1358 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
1359 || update_id == *cfg_data) {
1360 pr_err("%s hw update lock failed acq %d, cur id %u, last id %u\n",
1363 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
1369 case VFE_HW_UPDATE_UNLOCK: {
1370 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
1372 pr_err("hw update across frame boundary,begin id %u, end id %d\n",
1374 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
1376 vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
1377 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1382 uint32_t *data_ptr = cfg_data +
1383 reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
1384 for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
1385 if ((data_ptr < cfg_data) ||
1386 (UINT_MAX / sizeof(*data_ptr) <
1387 (data_ptr - cfg_data)) ||
1388 (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
1391 *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
1392 reg_cfg_cmd->u.rw_info.reg_offset);
1393 reg_cfg_cmd->u.rw_info.reg_offset += 4;
1397 case GET_MAX_CLK_RATE: {
1401 if (cmd_len != sizeof(__u32)) {
1402 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1403 __func__, __LINE__, cmd_len,
1407 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_max_clk_rate(
1410 pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1414 *(__u32 *)cfg_data = (__u32)rate;
1418 case GET_CLK_RATES: {
1420 struct msm_isp_clk_rates rates;
1421 struct msm_isp_clk_rates *user_data =
1422 (struct msm_isp_clk_rates *)cfg_data;
1423 if (cmd_len != sizeof(struct msm_isp_clk_rates)) {
1424 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1425 __func__, __LINE__, cmd_len,
1426 sizeof(struct msm_isp_clk_rates));
1429 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(
1432 pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1435 user_data->svs_rate = rates.svs_rate;
1436 user_data->nominal_rate = rates.nominal_rate;
1437 user_data->high_rate = rates.high_rate;
1441 uint32_t *isp_id = NULL;
1443 if (cmd_len < sizeof(uint32_t)) {
1444 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1445 __func__, __LINE__, cmd_len,
1450 isp_id = (uint32_t *)cfg_data;
1451 *isp_id = vfe_dev->pdev->id;
1454 case SET_WM_UB_SIZE:
1456 case SET_UB_POLICY: {
1458 if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
1459 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1460 __func__, __LINE__, cmd_len,
1461 sizeof(vfe_dev->vfe_ub_policy));
1464 vfe_dev->vfe_ub_policy = *cfg_data;
1471 int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
1474 struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
1475 struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
1476 uint32_t *cfg_data = NULL;
1478 if (!proc_cmd->num_cfg
1479 || proc_cmd->num_cfg > MAX_ISP_CMD_NUM) {
1480 pr_err("%s: num_cfg outside allowed range\n",
1485 reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
1486 proc_cmd->num_cfg, GFP_KERNEL);
1489 goto reg_cfg_failed;
1492 if (copy_from_user(reg_cfg_cmd,
1493 (void __user *)(proc_cmd->cfg_cmd),
1494 sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
1496 goto copy_cmd_failed;
1499 if (proc_cmd->cmd_len > 0) {
1500 if (proc_cmd->cmd_len > MAX_ISP_CMD_LEN) {
1501 pr_err("%s: cmd_len exceed max allowed\n", __func__);
1503 goto cfg_data_failed;
1506 cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
1509 goto cfg_data_failed;
1512 if (copy_from_user(cfg_data,
1513 (void __user *)(proc_cmd->cfg_data),
1514 proc_cmd->cmd_len)) {
1516 goto copy_cmd_failed;
1520 for (i = 0; i < proc_cmd->num_cfg; i++)
1521 rc = msm_isp_send_hw_cmd(vfe_dev, ®_cfg_cmd[i],
1522 cfg_data, proc_cmd->cmd_len);
1524 if (copy_to_user(proc_cmd->cfg_data,
1525 cfg_data, proc_cmd->cmd_len)) {
1527 goto copy_cmd_failed;
1538 int msm_isp_send_event(struct vfe_device *vfe_dev,
1539 uint32_t event_type,
1540 struct msm_isp_event_data *event_data)
1542 struct v4l2_event isp_event;
1544 memset(&isp_event, 0, sizeof(struct v4l2_event));
1546 isp_event.type = event_type;
1548 memcpy(&isp_event.u.data[0], event_data,
1549 sizeof(struct msm_isp_event_data));
1550 v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
1554 #define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
1556 int msm_isp_cal_word_per_line(uint32_t output_format,
1557 uint32_t pixel_per_line)
1561 switch (output_format) {
1562 case V4L2_PIX_FMT_SBGGR8:
1563 case V4L2_PIX_FMT_SGBRG8:
1564 case V4L2_PIX_FMT_SGRBG8:
1565 case V4L2_PIX_FMT_SRGGB8:
1566 case V4L2_PIX_FMT_QBGGR8:
1567 case V4L2_PIX_FMT_QGBRG8:
1568 case V4L2_PIX_FMT_QGRBG8:
1569 case V4L2_PIX_FMT_QRGGB8:
1570 case V4L2_PIX_FMT_JPEG:
1571 case V4L2_PIX_FMT_META:
1572 val = CAL_WORD(pixel_per_line, 1, 8);
1574 case V4L2_PIX_FMT_SBGGR10:
1575 case V4L2_PIX_FMT_SGBRG10:
1576 case V4L2_PIX_FMT_SGRBG10:
1577 case V4L2_PIX_FMT_SRGGB10:
1578 case V4L2_PIX_FMT_Y10:
1579 case V4L2_PIX_FMT_SBGGR10DPCM6:
1580 case V4L2_PIX_FMT_SGBRG10DPCM6:
1581 case V4L2_PIX_FMT_SGRBG10DPCM6:
1582 case V4L2_PIX_FMT_SRGGB10DPCM6:
1583 case V4L2_PIX_FMT_SBGGR10DPCM8:
1584 case V4L2_PIX_FMT_SGBRG10DPCM8:
1585 case V4L2_PIX_FMT_SGRBG10DPCM8:
1586 case V4L2_PIX_FMT_SRGGB10DPCM8:
1587 case V4L2_PIX_FMT_META10:
1588 val = CAL_WORD(pixel_per_line, 5, 32);
1590 case V4L2_PIX_FMT_SBGGR12:
1591 case V4L2_PIX_FMT_SGBRG12:
1592 case V4L2_PIX_FMT_SGRBG12:
1593 case V4L2_PIX_FMT_SRGGB12:
1594 case V4L2_PIX_FMT_Y12:
1595 val = CAL_WORD(pixel_per_line, 3, 16);
1597 case V4L2_PIX_FMT_SBGGR14:
1598 case V4L2_PIX_FMT_SGBRG14:
1599 case V4L2_PIX_FMT_SGRBG14:
1600 case V4L2_PIX_FMT_SRGGB14:
1601 val = CAL_WORD(pixel_per_line, 7, 32);
1603 case V4L2_PIX_FMT_QBGGR10:
1604 case V4L2_PIX_FMT_QGBRG10:
1605 case V4L2_PIX_FMT_QGRBG10:
1606 case V4L2_PIX_FMT_QRGGB10:
1607 val = CAL_WORD(pixel_per_line, 1, 6);
1609 case V4L2_PIX_FMT_QBGGR12:
1610 case V4L2_PIX_FMT_QGBRG12:
1611 case V4L2_PIX_FMT_QGRBG12:
1612 case V4L2_PIX_FMT_QRGGB12:
1613 val = CAL_WORD(pixel_per_line, 1, 5);
1615 case V4L2_PIX_FMT_QBGGR14:
1616 case V4L2_PIX_FMT_QGBRG14:
1617 case V4L2_PIX_FMT_QGRBG14:
1618 case V4L2_PIX_FMT_QRGGB14:
1619 val = CAL_WORD(pixel_per_line, 1, 4);
1621 case V4L2_PIX_FMT_NV12:
1622 case V4L2_PIX_FMT_NV21:
1623 case V4L2_PIX_FMT_NV14:
1624 case V4L2_PIX_FMT_NV41:
1625 case V4L2_PIX_FMT_NV16:
1626 case V4L2_PIX_FMT_NV61:
1627 case V4L2_PIX_FMT_GREY:
1628 val = CAL_WORD(pixel_per_line, 1, 8);
1630 case V4L2_PIX_FMT_YUYV:
1631 case V4L2_PIX_FMT_YVYU:
1632 case V4L2_PIX_FMT_UYVY:
1633 case V4L2_PIX_FMT_VYUY:
1634 val = CAL_WORD(pixel_per_line, 2, 8);
1636 case V4L2_PIX_FMT_P16BGGR10:
1637 case V4L2_PIX_FMT_P16GBRG10:
1638 case V4L2_PIX_FMT_P16GRBG10:
1639 case V4L2_PIX_FMT_P16RGGB10:
1640 val = CAL_WORD(pixel_per_line, 1, 4);
1642 case V4L2_PIX_FMT_NV24:
1643 case V4L2_PIX_FMT_NV42:
1644 val = CAL_WORD(pixel_per_line, 1, 8);
1646 /* TD: Add more image format */
1648 msm_isp_print_fourcc_error(__func__, output_format);
1654 enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
1656 switch (output_format) {
1657 case V4L2_PIX_FMT_SBGGR8:
1658 case V4L2_PIX_FMT_SGBRG8:
1659 case V4L2_PIX_FMT_SGRBG8:
1660 case V4L2_PIX_FMT_SRGGB8:
1661 case V4L2_PIX_FMT_SBGGR10:
1662 case V4L2_PIX_FMT_SGBRG10:
1663 case V4L2_PIX_FMT_SGRBG10:
1664 case V4L2_PIX_FMT_SRGGB10:
1665 case V4L2_PIX_FMT_SBGGR10DPCM6:
1666 case V4L2_PIX_FMT_SGBRG10DPCM6:
1667 case V4L2_PIX_FMT_SGRBG10DPCM6:
1668 case V4L2_PIX_FMT_SRGGB10DPCM6:
1669 case V4L2_PIX_FMT_SBGGR10DPCM8:
1670 case V4L2_PIX_FMT_SGBRG10DPCM8:
1671 case V4L2_PIX_FMT_SGRBG10DPCM8:
1672 case V4L2_PIX_FMT_SRGGB10DPCM8:
1673 case V4L2_PIX_FMT_SBGGR12:
1674 case V4L2_PIX_FMT_SGBRG12:
1675 case V4L2_PIX_FMT_SGRBG12:
1676 case V4L2_PIX_FMT_SRGGB12:
1677 case V4L2_PIX_FMT_SBGGR14:
1678 case V4L2_PIX_FMT_SGBRG14:
1679 case V4L2_PIX_FMT_SGRBG14:
1680 case V4L2_PIX_FMT_SRGGB14:
1681 case V4L2_PIX_FMT_GREY:
1682 case V4L2_PIX_FMT_Y10:
1683 case V4L2_PIX_FMT_Y12:
1685 case V4L2_PIX_FMT_QBGGR8:
1686 case V4L2_PIX_FMT_QGBRG8:
1687 case V4L2_PIX_FMT_QGRBG8:
1688 case V4L2_PIX_FMT_QRGGB8:
1689 case V4L2_PIX_FMT_QBGGR10:
1690 case V4L2_PIX_FMT_QGBRG10:
1691 case V4L2_PIX_FMT_QGRBG10:
1692 case V4L2_PIX_FMT_QRGGB10:
1693 case V4L2_PIX_FMT_QBGGR12:
1694 case V4L2_PIX_FMT_QGBRG12:
1695 case V4L2_PIX_FMT_QGRBG12:
1696 case V4L2_PIX_FMT_QRGGB12:
1697 case V4L2_PIX_FMT_QBGGR14:
1698 case V4L2_PIX_FMT_QGBRG14:
1699 case V4L2_PIX_FMT_QGRBG14:
1700 case V4L2_PIX_FMT_QRGGB14:
1702 case V4L2_PIX_FMT_P16BGGR10:
1703 case V4L2_PIX_FMT_P16GBRG10:
1704 case V4L2_PIX_FMT_P16GRBG10:
1705 case V4L2_PIX_FMT_P16RGGB10:
1708 msm_isp_print_fourcc_error(__func__, output_format);
1714 int msm_isp_get_bit_per_pixel(uint32_t output_format)
1716 switch (output_format) {
1717 case V4L2_PIX_FMT_Y4:
1719 case V4L2_PIX_FMT_Y6:
1721 case V4L2_PIX_FMT_SBGGR8:
1722 case V4L2_PIX_FMT_SGBRG8:
1723 case V4L2_PIX_FMT_SGRBG8:
1724 case V4L2_PIX_FMT_SRGGB8:
1725 case V4L2_PIX_FMT_QBGGR8:
1726 case V4L2_PIX_FMT_QGBRG8:
1727 case V4L2_PIX_FMT_QGRBG8:
1728 case V4L2_PIX_FMT_QRGGB8:
1729 case V4L2_PIX_FMT_JPEG:
1730 case V4L2_PIX_FMT_META:
1731 case V4L2_PIX_FMT_NV12:
1732 case V4L2_PIX_FMT_NV21:
1733 case V4L2_PIX_FMT_NV14:
1734 case V4L2_PIX_FMT_NV41:
1735 case V4L2_PIX_FMT_YVU410:
1736 case V4L2_PIX_FMT_YVU420:
1737 case V4L2_PIX_FMT_YUYV:
1738 case V4L2_PIX_FMT_YYUV:
1739 case V4L2_PIX_FMT_YVYU:
1740 case V4L2_PIX_FMT_UYVY:
1741 case V4L2_PIX_FMT_VYUY:
1742 case V4L2_PIX_FMT_YUV422P:
1743 case V4L2_PIX_FMT_YUV411P:
1744 case V4L2_PIX_FMT_Y41P:
1745 case V4L2_PIX_FMT_YUV444:
1746 case V4L2_PIX_FMT_YUV555:
1747 case V4L2_PIX_FMT_YUV565:
1748 case V4L2_PIX_FMT_YUV32:
1749 case V4L2_PIX_FMT_YUV410:
1750 case V4L2_PIX_FMT_YUV420:
1751 case V4L2_PIX_FMT_GREY:
1752 case V4L2_PIX_FMT_PAL8:
1753 case V4L2_PIX_FMT_UV8:
1754 case MSM_V4L2_PIX_FMT_META:
1756 case V4L2_PIX_FMT_SBGGR10:
1757 case V4L2_PIX_FMT_SGBRG10:
1758 case V4L2_PIX_FMT_SGRBG10:
1759 case V4L2_PIX_FMT_SRGGB10:
1760 case V4L2_PIX_FMT_SBGGR10DPCM6:
1761 case V4L2_PIX_FMT_SGBRG10DPCM6:
1762 case V4L2_PIX_FMT_SGRBG10DPCM6:
1763 case V4L2_PIX_FMT_SRGGB10DPCM6:
1764 case V4L2_PIX_FMT_SBGGR10DPCM8:
1765 case V4L2_PIX_FMT_SGBRG10DPCM8:
1766 case V4L2_PIX_FMT_SGRBG10DPCM8:
1767 case V4L2_PIX_FMT_SRGGB10DPCM8:
1768 case V4L2_PIX_FMT_QBGGR10:
1769 case V4L2_PIX_FMT_QGBRG10:
1770 case V4L2_PIX_FMT_QGRBG10:
1771 case V4L2_PIX_FMT_QRGGB10:
1772 case V4L2_PIX_FMT_Y10:
1773 case V4L2_PIX_FMT_Y10BPACK:
1774 case V4L2_PIX_FMT_P16BGGR10:
1775 case V4L2_PIX_FMT_P16GBRG10:
1776 case V4L2_PIX_FMT_P16GRBG10:
1777 case V4L2_PIX_FMT_P16RGGB10:
1778 case V4L2_PIX_FMT_META10:
1779 case MSM_V4L2_PIX_FMT_META10:
1781 case V4L2_PIX_FMT_SBGGR12:
1782 case V4L2_PIX_FMT_SGBRG12:
1783 case V4L2_PIX_FMT_SGRBG12:
1784 case V4L2_PIX_FMT_SRGGB12:
1785 case V4L2_PIX_FMT_QBGGR12:
1786 case V4L2_PIX_FMT_QGBRG12:
1787 case V4L2_PIX_FMT_QGRBG12:
1788 case V4L2_PIX_FMT_QRGGB12:
1789 case V4L2_PIX_FMT_Y12:
1791 case V4L2_PIX_FMT_SBGGR14:
1792 case V4L2_PIX_FMT_SGBRG14:
1793 case V4L2_PIX_FMT_SGRBG14:
1794 case V4L2_PIX_FMT_SRGGB14:
1795 case V4L2_PIX_FMT_QBGGR14:
1796 case V4L2_PIX_FMT_QGBRG14:
1797 case V4L2_PIX_FMT_QGRBG14:
1798 case V4L2_PIX_FMT_QRGGB14:
1800 case V4L2_PIX_FMT_NV16:
1801 case V4L2_PIX_FMT_NV61:
1802 case V4L2_PIX_FMT_Y16:
1804 case V4L2_PIX_FMT_NV24:
1805 case V4L2_PIX_FMT_NV42:
1807 /* TD: Add more image format */
1809 msm_isp_print_fourcc_error(__func__, output_format);
1810 pr_err("%s: Invalid output format %x\n",
1811 __func__, output_format);
1816 void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
1818 struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1820 error_info->info_dump_frame_count++;
1824 static int msm_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
1826 int rc = vfe_dev->buf_mgr->pagefault_debug_disable;
1828 pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %pK\n", __func__,
1829 __LINE__, vfe_dev->pdev->id, vfe_dev);
1831 msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
1833 if (vfe_dev->buf_mgr->pagefault_debug_disable == 0) {
1834 vfe_dev->buf_mgr->pagefault_debug_disable = 1;
1835 vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
1836 vfe_dev->page_fault_addr);
1837 msm_isp_print_ping_pong_address(vfe_dev,
1838 vfe_dev->page_fault_addr);
1839 vfe_dev->hw_info->vfe_ops.axi_ops.
1840 read_wm_ping_pong_addr(vfe_dev);
1845 void msm_isp_process_error_info(struct vfe_device *vfe_dev)
1847 struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1849 if (error_info->error_count == 1 ||
1850 !(error_info->info_dump_frame_count % 100)) {
1851 vfe_dev->hw_info->vfe_ops.core_ops.
1852 process_error_status(vfe_dev);
1853 error_info->error_mask0 = 0;
1854 error_info->error_mask1 = 0;
1855 error_info->camif_status = 0;
1856 error_info->violation_status = 0;
1860 static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
1861 uint32_t error_mask0, uint32_t error_mask1)
1863 vfe_dev->error_info.error_mask0 |= error_mask0;
1864 vfe_dev->error_info.error_mask1 |= error_mask1;
1865 vfe_dev->error_info.error_count++;
1868 void msm_isp_process_overflow_irq(
1869 struct vfe_device *vfe_dev,
1870 uint32_t *irq_status0, uint32_t *irq_status1,
1871 uint32_t force_overflow)
1873 uint32_t overflow_mask;
1875 /* if there are no active streams - do not start recovery */
1876 if (!vfe_dev->axi_data.num_active_stream)
1879 /* Mask out all other irqs if recovery is started */
1880 if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
1881 uint32_t halt_restart_mask0, halt_restart_mask1;
1883 vfe_dev->hw_info->vfe_ops.core_ops.
1884 get_halt_restart_mask(&halt_restart_mask0,
1885 &halt_restart_mask1);
1886 *irq_status0 &= halt_restart_mask0;
1887 *irq_status1 &= halt_restart_mask1;
1892 /* Check if any overflow bit is set */
1893 vfe_dev->hw_info->vfe_ops.core_ops.
1894 get_overflow_mask(&overflow_mask);
1895 if (!force_overflow)
1896 overflow_mask &= *irq_status1;
1898 if (overflow_mask) {
1899 struct msm_isp_event_data error_event;
1901 if (vfe_dev->reset_pending == 1) {
1902 pr_err("%s:%d failed: overflow %x during reset\n",
1903 __func__, __LINE__, overflow_mask);
1904 /* Clear overflow bits since reset is pending */
1905 *irq_status1 &= ~overflow_mask;
1909 ISP_DBG("%s: VFE%d Bus overflow detected: start recovery!\n",
1910 __func__, vfe_dev->pdev->id);
1913 /* maks off irq for current vfe */
1914 atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
1915 NO_OVERFLOW, OVERFLOW_DETECTED);
1916 vfe_dev->recovery_irq0_mask = vfe_dev->irq0_mask;
1917 vfe_dev->recovery_irq1_mask = vfe_dev->irq1_mask;
1919 vfe_dev->hw_info->vfe_ops.core_ops.
1920 set_halt_restart_mask(vfe_dev);
1922 /* mask off other vfe if dual vfe is used */
1923 if (vfe_dev->is_split) {
1924 uint32_t other_vfe_id;
1925 struct vfe_device *other_vfe_dev;
1927 other_vfe_id = (vfe_dev->pdev->id == ISP_VFE0) ?
1928 ISP_VFE1 : ISP_VFE0;
1929 other_vfe_dev = vfe_dev->common_data->
1930 dual_vfe_res->vfe_dev[other_vfe_id];
1931 if (other_vfe_dev) {
1932 other_vfe_dev->recovery_irq0_mask =
1933 other_vfe_dev->irq0_mask;
1934 other_vfe_dev->recovery_irq1_mask =
1935 other_vfe_dev->irq1_mask;
1938 atomic_cmpxchg(&(vfe_dev->common_data->dual_vfe_res->
1939 vfe_dev[other_vfe_id]->
1940 error_info.overflow_state),
1941 NO_OVERFLOW, OVERFLOW_DETECTED);
1943 vfe_dev->hw_info->vfe_ops.core_ops.
1944 set_halt_restart_mask(vfe_dev->common_data->
1945 dual_vfe_res->vfe_dev[other_vfe_id]);
1948 /* reset irq status so skip further process */
1952 /* send overflow event as needed */
1953 if (atomic_read(&vfe_dev->error_info.overflow_state)
1955 memset(&error_event, 0, sizeof(error_event));
1956 error_event.frame_id =
1957 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1958 error_event.u.error_info.err_type =
1959 ISP_ERROR_BUS_OVERFLOW;
1960 msm_isp_send_event(vfe_dev,
1961 ISP_EVENT_ERROR, &error_event);
1966 void msm_isp_reset_burst_count_and_frame_drop(
1967 struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
1969 if ((stream_info->state != ACTIVE) ||
1970 (stream_info->stream_type != BURST_STREAM)) {
1973 if (stream_info->num_burst_capture != 0)
1974 msm_isp_reset_framedrop(vfe_dev, stream_info);
1977 static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
1978 uint32_t irq_status0, uint32_t irq_status1,
1979 uint32_t ping_pong_status)
1981 unsigned long flags;
1982 struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
1984 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
1985 queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
1986 if (queue_cmd->cmd_used) {
1987 ISP_DBG("%s: Tasklet queue overflow: %d\n",
1988 __func__, vfe_dev->pdev->id);
1989 list_del(&queue_cmd->list);
1991 atomic_add(1, &vfe_dev->irq_cnt);
1993 queue_cmd->vfeInterruptStatus0 = irq_status0;
1994 queue_cmd->vfeInterruptStatus1 = irq_status1;
1995 queue_cmd->vfePingPongStatus = ping_pong_status;
1996 msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
1997 queue_cmd->cmd_used = 1;
1998 vfe_dev->taskletq_idx = (vfe_dev->taskletq_idx + 1) %
1999 MSM_VFE_TASKLETQ_SIZE;
2000 list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
2001 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2002 tasklet_schedule(&vfe_dev->vfe_tasklet);
2005 irqreturn_t msm_isp_process_irq(int irq_num, void *data)
2007 struct vfe_device *vfe_dev = (struct vfe_device *) data;
2008 uint32_t irq_status0, irq_status1, ping_pong_status;
2009 uint32_t error_mask0, error_mask1;
2011 vfe_dev->hw_info->vfe_ops.irq_ops.
2012 read_irq_status_and_clear(vfe_dev, &irq_status0, &irq_status1);
2014 if ((irq_status0 == 0) && (irq_status1 == 0)) {
2015 pr_err("%s:VFE%d irq_status0 & 1 are both 0\n",
2016 __func__, vfe_dev->pdev->id);
2020 ping_pong_status = vfe_dev->hw_info->vfe_ops.axi_ops.
2021 get_pingpong_status(vfe_dev);
2022 if (vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq) {
2023 vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq(vfe_dev,
2026 msm_isp_process_overflow_irq(vfe_dev,
2027 &irq_status0, &irq_status1, 0);
2029 vfe_dev->hw_info->vfe_ops.core_ops.
2030 get_error_mask(&error_mask0, &error_mask1);
2031 error_mask0 &= irq_status0;
2032 error_mask1 &= irq_status1;
2033 irq_status0 &= ~error_mask0;
2034 irq_status1 &= ~error_mask1;
2035 if ((error_mask0 != 0) || (error_mask1 != 0))
2036 msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
2038 if ((irq_status0 == 0) && (irq_status1 == 0) &&
2039 (!(((error_mask0 != 0) || (error_mask1 != 0)) &&
2040 vfe_dev->error_info.error_count == 1))) {
2041 ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
2044 dump_data.vfe_dev = (struct vfe_device *) data;
2045 if (vfe_dev->is_split &&
2046 (vfe_dev->common_data->dual_vfe_res->vfe_dev[
2047 !vfe_dev->pdev->id]) &&
2048 (vfe_dev->common_data->dual_vfe_res->vfe_dev[
2049 !vfe_dev->pdev->id]->vfe_open_cnt)) {
2050 spin_lock(&dump_irq_lock);
2051 dump_data.arr[dump_data.first].current_vfe_irq.
2052 vfe_id = vfe_dev->pdev->id;
2053 dump_data.arr[dump_data.first].current_vfe_irq.
2054 irq_status0 = irq_status0;
2055 dump_data.arr[dump_data.first].current_vfe_irq.
2056 irq_status1 = irq_status1;
2057 dump_data.arr[dump_data.first].current_vfe_irq.
2058 ping_pong_status = ping_pong_status;
2060 dump_data.arr[dump_data.first].other_vfe.
2061 vfe_id = (!vfe_dev->pdev->id);
2062 vfe_dev->hw_info->vfe_ops.irq_ops.
2064 vfe_dev->common_data->dual_vfe_res->vfe_dev[
2065 !vfe_dev->pdev->id],
2066 &dump_data.arr[dump_data.first].other_vfe.irq_status0,
2067 &dump_data.arr[dump_data.first].other_vfe.irq_status1);
2068 dump_data.arr[dump_data.first].other_vfe.
2070 vfe_dev->hw_info->vfe_ops.axi_ops.
2071 get_pingpong_status(
2072 vfe_dev->common_data->dual_vfe_res->vfe_dev[
2073 !vfe_dev->pdev->id]);
2074 msm_isp_get_timestamp(&dump_data.arr[dump_data.first].
2075 other_vfe.ts, vfe_dev);
2077 (dump_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2078 dump_data.fill_count++;
2079 spin_unlock(&dump_irq_lock);
2081 msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1,
2088 void msm_isp_do_tasklet(unsigned long data)
2090 unsigned long flags;
2091 struct vfe_device *vfe_dev = (struct vfe_device *) data;
2092 struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
2093 struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2094 struct msm_isp_timestamp ts;
2095 uint32_t irq_status0, irq_status1, pingpong_status;
2097 if (vfe_dev->vfe_base == NULL || vfe_dev->vfe_open_cnt == 0) {
2098 ISP_DBG("%s: VFE%d open cnt = %d, device closed(base = %pK)\n",
2099 __func__, vfe_dev->pdev->id, vfe_dev->vfe_open_cnt,
2104 while (atomic_read(&vfe_dev->irq_cnt)) {
2105 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
2106 queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
2107 struct msm_vfe_tasklet_queue_cmd, list);
2110 atomic_set(&vfe_dev->irq_cnt, 0);
2111 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2114 atomic_sub(1, &vfe_dev->irq_cnt);
2115 list_del(&queue_cmd->list);
2116 queue_cmd->cmd_used = 0;
2117 irq_status0 = queue_cmd->vfeInterruptStatus0;
2118 irq_status1 = queue_cmd->vfeInterruptStatus1;
2119 pingpong_status = queue_cmd->vfePingPongStatus;
2121 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2122 ISP_DBG("%s: vfe_id %d status0: 0x%x status1: 0x%x\n",
2123 __func__, vfe_dev->pdev->id, irq_status0, irq_status1);
2124 if (vfe_dev->is_split) {
2125 spin_lock(&dump_tasklet_lock);
2126 tasklet_data.arr[tasklet_data.first].
2127 current_vfe_irq.vfe_id = vfe_dev->pdev->id;
2128 tasklet_data.arr[tasklet_data.first].
2129 current_vfe_irq.core = smp_processor_id();
2130 tasklet_data.arr[tasklet_data.first].
2131 current_vfe_irq.irq_status0 = irq_status0;
2132 tasklet_data.arr[tasklet_data.first].
2133 current_vfe_irq.irq_status1 = irq_status1;
2134 tasklet_data.arr[tasklet_data.first].
2135 current_vfe_irq.ping_pong_status = pingpong_status;
2136 tasklet_data.arr[tasklet_data.first].
2137 current_vfe_irq.ts = ts;
2138 tasklet_data.first =
2139 (tasklet_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2140 spin_unlock(&dump_tasklet_lock);
2142 irq_ops->process_reset_irq(vfe_dev,
2143 irq_status0, irq_status1);
2144 irq_ops->process_halt_irq(vfe_dev,
2145 irq_status0, irq_status1);
2146 if (atomic_read(&vfe_dev->error_info.overflow_state)
2148 ISP_DBG("%s: Recovery in processing, Ignore IRQs!!!\n",
2152 msm_isp_process_error_info(vfe_dev);
2153 irq_ops->process_stats_irq(vfe_dev,
2154 irq_status0, irq_status1,
2155 pingpong_status, &ts);
2156 irq_ops->process_axi_irq(vfe_dev,
2157 irq_status0, irq_status1,
2158 pingpong_status, &ts);
2159 irq_ops->process_camif_irq(vfe_dev,
2160 irq_status0, irq_status1, &ts);
2161 irq_ops->process_reg_update(vfe_dev,
2162 irq_status0, irq_status1, &ts);
2163 irq_ops->process_sof_irq(vfe_dev,
2164 irq_status0, irq_status1, &ts);
2165 irq_ops->process_epoch_irq(vfe_dev,
2166 irq_status0, irq_status1, &ts);
2170 int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
2172 struct msm_vfe_axi_src_state *src_state = arg;
2174 if (src_state->input_src >= VFE_SRC_MAX)
2176 vfe_dev->axi_data.src_info[src_state->input_src].active =
2177 src_state->src_active;
2178 vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
2179 src_state->src_frame_id;
2183 static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
2184 struct device *dev, unsigned long iova, int flags, void *token)
2186 struct vfe_device *vfe_dev = NULL;
2189 vfe_dev = (struct vfe_device *)token;
2190 vfe_dev->page_fault_addr = iova;
2191 if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops ||
2192 !vfe_dev->axi_data.num_active_stream) {
2193 pr_err("%s:%d buf_mgr %pK active strms %d\n", __func__,
2194 __LINE__, vfe_dev->buf_mgr,
2195 vfe_dev->axi_data.num_active_stream);
2199 mutex_lock(&vfe_dev->core_mutex);
2200 if (vfe_dev->vfe_open_cnt > 0) {
2201 atomic_set(&vfe_dev->error_info.overflow_state,
2203 msm_isp_process_iommu_page_fault(vfe_dev);
2205 pr_err("%s: no handling, vfe open cnt = %d\n",
2206 __func__, vfe_dev->vfe_open_cnt);
2208 mutex_unlock(&vfe_dev->core_mutex);
2210 ISP_DBG("%s:%d] no token received: %pK\n",
2211 __func__, __LINE__, token);
2218 int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2220 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2222 enum cam_ahb_clk_client id;
2224 ISP_DBG("%s open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2226 if (vfe_dev->common_data == NULL ||
2227 vfe_dev->common_data->dual_vfe_res == NULL) {
2228 pr_err("%s: Error in probe. No common_data or dual vfe res\n",
2233 if (vfe_dev->pdev->id == ISP_VFE0)
2234 vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
2236 mutex_lock(&vfe_dev->realtime_mutex);
2237 mutex_lock(&vfe_dev->core_mutex);
2239 if (vfe_dev->vfe_open_cnt++) {
2240 mutex_unlock(&vfe_dev->core_mutex);
2241 mutex_unlock(&vfe_dev->realtime_mutex);
2245 vfe_dev->reset_pending = 0;
2246 vfe_dev->isp_sof_debug = 0;
2247 vfe_dev->isp_raw0_debug = 0;
2248 vfe_dev->isp_raw1_debug = 0;
2249 vfe_dev->isp_raw2_debug = 0;
2251 if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
2252 pr_err("%s: init hardware failed\n", __func__);
2253 vfe_dev->vfe_open_cnt--;
2254 mutex_unlock(&vfe_dev->core_mutex);
2255 mutex_unlock(&vfe_dev->realtime_mutex);
2259 memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2260 atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
2262 vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
2264 vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
2265 ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
2266 rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
2268 pr_err("%s: reset timeout\n", __func__);
2269 vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2270 vfe_dev->vfe_open_cnt--;
2271 mutex_unlock(&vfe_dev->core_mutex);
2272 mutex_unlock(&vfe_dev->realtime_mutex);
2276 vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
2278 vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr,
2281 memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
2282 memset(&vfe_dev->stats_data, 0,
2283 sizeof(struct msm_vfe_stats_shared_data));
2284 memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2285 memset(&vfe_dev->fetch_engine_info, 0,
2286 sizeof(vfe_dev->fetch_engine_info));
2287 vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
2288 vfe_dev->axi_data.enable_frameid_recovery = 0;
2289 vfe_dev->taskletq_idx = 0;
2290 vfe_dev->vt_enable = 0;
2291 vfe_dev->reg_update_requested = 0;
2292 /* Register page fault handler */
2293 vfe_dev->buf_mgr->pagefault_debug_disable = 0;
2294 cam_smmu_reg_client_page_fault_handler(
2295 vfe_dev->buf_mgr->iommu_hdl,
2296 msm_vfe_iommu_fault_handler, vfe_dev);
2298 /* Disable vfe clks and allow device to go XO shutdown mode */
2299 if (vfe_dev->pdev->id == 0)
2300 id = CAM_AHB_CLIENT_VFE0;
2302 id = CAM_AHB_CLIENT_VFE1;
2303 if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
2304 pr_err("%s: failed to remove vote for AHB\n", __func__);
2305 vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
2306 vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
2308 mutex_unlock(&vfe_dev->core_mutex);
2309 mutex_unlock(&vfe_dev->realtime_mutex);
2313 #ifdef CONFIG_MSM_AVTIMER
2314 static void msm_isp_end_avtimer(void)
2316 avcs_core_disable_power_collapse(0);
2319 static void msm_isp_end_avtimer(void)
2321 pr_err("AV Timer is not supported\n");
2325 int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2329 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2330 enum cam_ahb_clk_client id;
2332 ISP_DBG("%s E open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2333 mutex_lock(&vfe_dev->realtime_mutex);
2334 mutex_lock(&vfe_dev->core_mutex);
2336 /* Enable vfe clks to wake up from XO shutdown mode */
2337 if (vfe_dev->pdev->id == 0)
2338 id = CAM_AHB_CLIENT_VFE0;
2340 id = CAM_AHB_CLIENT_VFE1;
2341 if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE) < 0)
2342 pr_err("%s: failed to vote for AHB\n", __func__);
2343 vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 1);
2344 vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 1);
2346 if (!vfe_dev->vfe_open_cnt) {
2347 pr_err("%s invalid state open cnt %d\n", __func__,
2348 vfe_dev->vfe_open_cnt);
2349 mutex_unlock(&vfe_dev->core_mutex);
2350 mutex_unlock(&vfe_dev->realtime_mutex);
2354 if (vfe_dev->vfe_open_cnt > 1) {
2355 vfe_dev->vfe_open_cnt--;
2356 mutex_unlock(&vfe_dev->core_mutex);
2357 mutex_unlock(&vfe_dev->realtime_mutex);
2360 /* Unregister page fault handler */
2361 cam_smmu_reg_client_page_fault_handler(
2362 vfe_dev->buf_mgr->iommu_hdl,
2365 rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
2367 pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
2369 vfe_dev->hw_info->vfe_ops.core_ops.
2370 update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
2371 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 0);
2373 /* after regular hw stop, reduce open cnt */
2374 vfe_dev->vfe_open_cnt--;
2376 /* put scratch buf in all the wm */
2377 for (wm = 0; wm < vfe_dev->axi_data.hw_info->num_wm; wm++) {
2378 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PING_FLAG);
2379 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PONG_FLAG);
2381 vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2382 vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
2383 if (vfe_dev->vt_enable) {
2384 msm_isp_end_avtimer();
2385 vfe_dev->vt_enable = 0;
2387 vfe_dev->is_split = 0;
2389 mutex_unlock(&vfe_dev->core_mutex);
2390 mutex_unlock(&vfe_dev->realtime_mutex);
2394 void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
2396 unsigned long flags;
2397 struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2399 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
2400 while (atomic_read(&vfe_dev->irq_cnt)) {
2401 queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
2402 struct msm_vfe_tasklet_queue_cmd, list);
2405 atomic_set(&vfe_dev->irq_cnt, 0);
2408 atomic_sub(1, &vfe_dev->irq_cnt);
2409 list_del(&queue_cmd->list);
2410 queue_cmd->cmd_used = 0;
2412 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2415 void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
2416 enum msm_vfe_input_src frame_src)
2418 struct msm_vfe_axi_stream *stream_info = NULL;
2420 unsigned long flags;
2422 for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
2423 stream_info = &vfe_dev->axi_data.stream_info[j];
2424 if (stream_info->state != ACTIVE)
2426 if (frame_src != SRC_TO_INTF(stream_info->stream_src))
2430 &vfe_dev->axi_data.stream_info[j];
2431 spin_lock_irqsave(&stream_info->lock, flags);
2432 stream_info->activated_framedrop_period =
2433 stream_info->requested_framedrop_period;
2434 spin_unlock_irqrestore(&stream_info->lock, flags);
2438 static void msm_isp_dump_irq_debug(void)
2440 uint32_t index, count, i;
2442 if (dump_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
2443 index = dump_data.first;
2444 count = MAX_ISP_PING_PONG_DUMP_SIZE;
2447 count = dump_data.first;
2449 for (i = 0; i < count; i++) {
2450 pr_err("%s: trace_msm_cam_ping_pong_debug_dump need re-impl\n",
2452 index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2456 void msm_isp_dump_taskelet_debug(void)
2458 uint32_t index, count, i;
2460 if (tasklet_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
2461 index = tasklet_data.first;
2462 count = MAX_ISP_PING_PONG_DUMP_SIZE;
2465 count = tasklet_data.first;
2467 for (i = 0; i < count; i++) {
2468 pr_err("%s: trace_msm_cam_tasklet_debug_dump need implement\n",
2470 index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2474 void msm_isp_dump_ping_pong_mismatch(void)
2478 spin_lock(&dump_tasklet_lock);
2479 for (i = 0; i < MAX_VFE; i++) {
2480 dump_data.vfe_dev->hw_info->vfe_ops.axi_ops.
2482 dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[i]);
2484 (uint32_t)dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[
2485 i]->vfe_irq->start);
2487 trace_msm_cam_string(" ***** msm_isp_dump_irq_debug ****");
2488 msm_isp_dump_irq_debug();
2489 trace_msm_cam_string(" ***** msm_isp_dump_taskelet_debug ****");
2490 msm_isp_dump_taskelet_debug();
2491 spin_unlock(&dump_tasklet_lock);