1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/mutex.h>
15 #include <media/v4l2-subdev.h>
16 #include <linux/ratelimit.h>
19 #include "msm_isp_util.h"
20 #include "msm_isp_axi_util.h"
21 #include "msm_isp_stats_util.h"
22 #include "msm_camera_io_util.h"
23 #include "cam_smmu_api.h"
24 #define CREATE_TRACE_POINTS
25 #include "trace/events/msm_cam.h"
27 #define MAX_ISP_V4l2_EVENTS 100
28 #define MAX_ISP_REG_LIST 100
29 #define MAX_ISP_CMD_NUM 10
30 #define MAX_ISP_CMD_LEN 4096
31 static DEFINE_MUTEX(bandwidth_mgr_mutex);
32 static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
34 static uint64_t msm_isp_cpp_clk_rate;
35 static struct dump_ping_pong_state dump_data;
36 static struct dump_ping_pong_state tasklet_data;
37 static DEFINE_SPINLOCK(dump_irq_lock);
38 static DEFINE_SPINLOCK(dump_tasklet_lock);
40 #define VFE40_8974V2_VERSION 0x1001001A
42 void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
48 for (i = 0; i < 4; i++) {
49 text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
50 if ((text[i] < '0') || (text[i] > 'z')) {
51 pr_err("%s: Invalid output format %d (unprintable)\n",
52 origin, fourcc_format);
56 pr_err("%s: Invalid output format %s\n",
60 int msm_isp_init_bandwidth_mgr(struct vfe_device *vfe_dev,
61 enum msm_isp_hw_client client)
65 mutex_lock(&bandwidth_mgr_mutex);
66 if (isp_bandwidth_mgr.client_info[client].active) {
67 mutex_unlock(&bandwidth_mgr_mutex);
70 isp_bandwidth_mgr.client_info[client].active = 1;
71 isp_bandwidth_mgr.use_count++;
72 if (vfe_dev && !isp_bandwidth_mgr.bus_client) {
73 rc = vfe_dev->hw_info->vfe_ops.platform_ops.init_bw_mgr(vfe_dev,
76 isp_bandwidth_mgr.update_bw =
77 vfe_dev->hw_info->vfe_ops.platform_ops.update_bw;
78 isp_bandwidth_mgr.deinit_bw_mgr =
79 vfe_dev->hw_info->vfe_ops.platform_ops.deinit_bw_mgr;
83 isp_bandwidth_mgr.use_count--;
84 isp_bandwidth_mgr.client_info[client].active = 0;
87 mutex_unlock(&bandwidth_mgr_mutex);
91 int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
92 uint64_t ab, uint64_t ib)
96 mutex_lock(&bandwidth_mgr_mutex);
97 if (!isp_bandwidth_mgr.use_count ||
98 !isp_bandwidth_mgr.bus_client) {
99 pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
100 __func__, isp_bandwidth_mgr.use_count,
101 isp_bandwidth_mgr.bus_client);
102 mutex_unlock(&bandwidth_mgr_mutex);
106 isp_bandwidth_mgr.client_info[client].ab = ab;
107 isp_bandwidth_mgr.client_info[client].ib = ib;
108 rc = isp_bandwidth_mgr.update_bw(&isp_bandwidth_mgr);
109 mutex_unlock(&bandwidth_mgr_mutex);
113 void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
115 if (client >= MAX_ISP_CLIENT) {
116 pr_err("invalid Client id %d", client);
119 mutex_lock(&bandwidth_mgr_mutex);
120 memset(&isp_bandwidth_mgr.client_info[client], 0,
121 sizeof(struct msm_isp_bandwidth_info));
122 if (isp_bandwidth_mgr.use_count) {
123 isp_bandwidth_mgr.use_count--;
124 if (isp_bandwidth_mgr.use_count) {
125 mutex_unlock(&bandwidth_mgr_mutex);
129 if (!isp_bandwidth_mgr.bus_client) {
130 pr_err("%s:%d error: bus client invalid\n",
132 mutex_unlock(&bandwidth_mgr_mutex);
136 isp_bandwidth_mgr.deinit_bw_mgr(
139 mutex_unlock(&bandwidth_mgr_mutex);
142 void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
143 struct msm_isp_statistics *stats)
145 stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
146 stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
147 stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
149 stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
150 stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
151 stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
153 stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
154 stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
155 stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
156 stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
157 stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
158 stats->vfe_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
159 stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
162 void msm_isp_util_update_clk_rate(long clock_rate)
164 msm_isp_cpp_clk_rate = clock_rate;
167 uint32_t msm_isp_get_framedrop_period(
168 enum msm_vfe_frame_skip_pattern frame_skip_pattern)
170 switch (frame_skip_pattern) {
179 return frame_skip_pattern + 1;
192 void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
193 struct vfe_device *vfe_dev)
197 do_gettimeofday(&(time_stamp->event_time));
198 if (vfe_dev->vt_enable) {
199 msm_isp_get_avtimer_ts(time_stamp);
200 time_stamp->buf_time.tv_sec = time_stamp->vt_time.tv_sec;
201 time_stamp->buf_time.tv_usec = time_stamp->vt_time.tv_usec;
203 get_monotonic_boottime(&ts);
204 time_stamp->buf_time.tv_sec = ts.tv_sec;
205 time_stamp->buf_time.tv_usec = ts.tv_nsec/1000;
210 static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
212 u32 evt_id = ISP_EVENT_SUBS_MASK_NONE;
215 case ISP_EVENT_MASK_INDEX_STATS_NOTIFY:
216 evt_id = ISP_EVENT_STATS_NOTIFY;
218 case ISP_EVENT_MASK_INDEX_ERROR:
219 evt_id = ISP_EVENT_ERROR;
221 case ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT:
222 evt_id = ISP_EVENT_IOMMU_P_FAULT;
224 case ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE:
225 evt_id = ISP_EVENT_STREAM_UPDATE_DONE;
227 case ISP_EVENT_MASK_INDEX_REG_UPDATE:
228 evt_id = ISP_EVENT_REG_UPDATE;
230 case ISP_EVENT_MASK_INDEX_SOF:
231 evt_id = ISP_EVENT_SOF;
233 case ISP_EVENT_MASK_INDEX_BUF_DIVERT:
234 evt_id = ISP_EVENT_BUF_DIVERT;
236 case ISP_EVENT_MASK_INDEX_BUF_DONE:
237 evt_id = ISP_EVENT_BUF_DONE;
239 case ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY:
240 evt_id = ISP_EVENT_COMP_STATS_NOTIFY;
242 case ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE:
243 evt_id = ISP_EVENT_FE_READ_DONE;
245 case ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH:
246 evt_id = ISP_EVENT_PING_PONG_MISMATCH;
248 case ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING:
249 evt_id = ISP_EVENT_REG_UPDATE_MISSING;
251 case ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR:
252 evt_id = ISP_EVENT_BUF_FATAL_ERROR;
255 evt_id = ISP_EVENT_SUBS_MASK_NONE;
262 static inline int msm_isp_subscribe_event_mask(struct v4l2_fh *fh,
263 struct v4l2_event_subscription *sub, int evt_mask_index,
264 u32 evt_id, bool subscribe_flag)
266 int rc = 0, i, interface;
268 if (evt_mask_index == ISP_EVENT_MASK_INDEX_STATS_NOTIFY) {
269 for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
270 sub->type = evt_id + i;
272 rc = v4l2_event_subscribe(fh, sub,
273 MAX_ISP_V4l2_EVENTS, NULL);
275 rc = v4l2_event_unsubscribe(fh, sub);
277 pr_err("%s: Subs event_type =0x%x failed\n",
278 __func__, sub->type);
282 } else if (evt_mask_index == ISP_EVENT_MASK_INDEX_SOF ||
283 evt_mask_index == ISP_EVENT_MASK_INDEX_REG_UPDATE ||
284 evt_mask_index == ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE) {
285 for (interface = 0; interface < VFE_SRC_MAX; interface++) {
286 sub->type = evt_id | interface;
288 rc = v4l2_event_subscribe(fh, sub,
289 MAX_ISP_V4l2_EVENTS, NULL);
291 rc = v4l2_event_unsubscribe(fh, sub);
293 pr_err("%s: Subs event_type =0x%x failed\n",
294 __func__, sub->type);
301 rc = v4l2_event_subscribe(fh, sub,
302 MAX_ISP_V4l2_EVENTS, NULL);
304 rc = v4l2_event_unsubscribe(fh, sub);
306 pr_err("%s: Subs event_type =0x%x failed\n",
307 __func__, sub->type);
314 static inline int msm_isp_process_event_subscription(struct v4l2_fh *fh,
315 struct v4l2_event_subscription *sub, bool subscribe_flag)
317 int rc = 0, evt_mask_index = 0;
318 u32 evt_mask = sub->type;
321 if (evt_mask == ISP_EVENT_SUBS_MASK_NONE) {
322 pr_err("%s: Subs event_type is None=0x%x\n",
327 for (evt_mask_index = ISP_EVENT_MASK_INDEX_STATS_NOTIFY;
328 evt_mask_index <= ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR;
330 if (evt_mask & (1<<evt_mask_index)) {
331 evt_id = msm_isp_evt_mask_to_isp_event(evt_mask_index);
332 rc = msm_isp_subscribe_event_mask(fh, sub,
333 evt_mask_index, evt_id, subscribe_flag);
335 pr_err("%s: Subs event index:%d failed\n",
336 __func__, evt_mask_index);
344 int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
345 struct v4l2_event_subscription *sub)
347 return msm_isp_process_event_subscription(fh, sub, true);
350 int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
351 struct v4l2_event_subscription *sub)
353 return msm_isp_process_event_subscription(fh, sub, false);
356 static int msm_isp_start_fetch_engine(struct vfe_device *vfe_dev,
359 struct msm_vfe_fetch_eng_start *fe_cfg = arg;
361 * For Offline VFE, HAL expects same frame id
362 * for offline output which it requested in do_reprocess.
364 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
366 return vfe_dev->hw_info->vfe_ops.core_ops.
367 start_fetch_eng(vfe_dev, arg);
370 static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
373 struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
374 struct msm_vfe_axi_stream *stream_info = NULL;
376 uint32_t wm_reload_mask = 0;
378 * For Offline VFE, HAL expects same frame id
379 * for offline output which it requested in do_reprocess.
381 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
384 if (fe_cfg->offline_pass == OFFLINE_SECOND_PASS) {
385 stream_info = &vfe_dev->axi_data.stream_info[
386 HANDLE_TO_IDX(fe_cfg->output_stream_id)];
388 pr_err("%s: Couldn't find streamid 0x%X\n", __func__,
389 fe_cfg->output_stream_id);
392 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
394 msm_isp_reset_framedrop(vfe_dev, stream_info);
395 mutex_lock(&vfe_dev->buf_mgr->lock);
396 rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
397 VFE_PING_FLAG, fe_cfg->output_buf_idx);
398 mutex_unlock(&vfe_dev->buf_mgr->lock);
400 pr_err("%s: Fetch engine config failed\n", __func__);
403 for (i = 0; i < stream_info->num_planes; i++) {
404 vfe_dev->hw_info->vfe_ops.axi_ops.
405 enable_wm(vfe_dev->vfe_base, stream_info->wm[i],
407 wm_reload_mask |= (1 << stream_info->wm[i]);
409 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
411 vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
412 vfe_dev->vfe_base, wm_reload_mask);
414 return vfe_dev->hw_info->vfe_ops.core_ops.
415 start_fetch_eng_multi_pass(vfe_dev, arg);
418 void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
419 struct msm_vfe_fetch_engine_info *fetch_engine_info)
421 struct msm_isp_event_data fe_rd_done_event;
423 memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data));
424 fe_rd_done_event.frame_id =
425 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
426 fe_rd_done_event.u.fetch_done.session_id =
427 fetch_engine_info->session_id;
428 fe_rd_done_event.u.fetch_done.stream_id = fetch_engine_info->stream_id;
429 fe_rd_done_event.u.fetch_done.handle = fetch_engine_info->bufq_handle;
430 fe_rd_done_event.u.fetch_done.buf_idx = fetch_engine_info->buf_idx;
431 fe_rd_done_event.u.fetch_done.fd = fetch_engine_info->fd;
432 fe_rd_done_event.u.fetch_done.offline_mode =
433 fetch_engine_info->offline_mode;
435 ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
436 __func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
437 fetch_engine_info->is_busy = 0;
438 msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
441 static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
442 struct msm_vfe_input_cfg *input_cfg)
445 struct msm_vfe_pix_cfg *pix_cfg = NULL;
447 pr_debug("%s: entry\n", __func__);
449 if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
450 pr_err("%s: pixel path is active\n", __func__);
454 pix_cfg = &input_cfg->d.pix_cfg;
456 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
457 input_cfg->input_pix_clk;
458 vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
459 input_cfg->d.pix_cfg.input_mux;
460 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
461 input_cfg->d.pix_cfg.input_format;
462 vfe_dev->axi_data.src_info[VFE_PIX_0].sof_counter_step = 1;
465 * Fill pixel_clock into input_pix_clk so that user space
466 * can use rounded clk rate
468 input_cfg->input_pix_clk =
469 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock;
471 ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
472 input_cfg->d.pix_cfg.input_mux, CAMIF,
473 input_cfg->d.pix_cfg.input_format);
475 if (input_cfg->d.pix_cfg.input_mux == CAMIF ||
476 input_cfg->d.pix_cfg.input_mux == TESTGEN) {
477 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
478 input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
479 if (input_cfg->d.pix_cfg.camif_cfg.subsample_cfg.
480 sof_counter_step > 0) {
481 vfe_dev->axi_data.src_info[VFE_PIX_0].
482 sof_counter_step = input_cfg->d.pix_cfg.
483 camif_cfg.subsample_cfg.sof_counter_step;
485 } else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
486 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
487 input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
489 vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
490 vfe_dev, &input_cfg->d.pix_cfg);
491 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_PIX_0);
493 pr_debug("%s: exit\n", __func__);
498 static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
499 struct msm_vfe_input_cfg *input_cfg)
503 if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
504 pr_err("%s: RAW%d path is active\n", __func__,
505 input_cfg->input_src - VFE_RAW_0);
509 vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
510 input_cfg->input_pix_clk;
511 vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
512 vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
516 static int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
519 struct msm_vfe_input_cfg *input_cfg = arg;
520 long pixel_clock = 0;
522 switch (input_cfg->input_src) {
524 rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
529 rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
532 pr_err("%s: Invalid input source\n", __func__);
536 pixel_clock = input_cfg->input_pix_clk;
537 rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
540 pr_err("%s: clock set rate failed\n", __func__);
546 static int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg)
549 struct msm_vfe_camif_cfg *camif_cfg = arg;
550 struct msm_vfe_input_cfg input_cfg;
551 long pixel_clock = 0;
553 pr_debug("%s: entry\n", __func__);
555 memset(&input_cfg, 0, sizeof(input_cfg));
557 input_cfg.input_src = VFE_PIX_0;
558 input_cfg.input_pix_clk = 320000000;
559 input_cfg.d.pix_cfg.camif_cfg = *camif_cfg;
561 /* populate values from operation cfg */
562 input_cfg.d.pix_cfg.input_mux =
563 vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux;
564 input_cfg.d.pix_cfg.camif_cfg.camif_input =
565 vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input;
567 rc = msm_isp_cfg_pix(vfe_dev, &input_cfg);
569 pixel_clock = input_cfg.input_pix_clk;
570 rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
573 pr_err("%s: clock set rate failed\n", __func__);
577 pr_debug("%s: exit\n", __func__);
583 static int msm_isp_operation_cfg(struct vfe_device *vfe_dev, void *arg)
585 struct msm_vfe_operation_cfg *op_cfg = arg;
587 pr_debug("%s: entry\n", __func__);
589 vfe_dev->hvx_cmd = op_cfg->hvx_cmd;
590 vfe_dev->is_split = 0; /* default to false */
592 /* yuv_cosited currently not used */
593 /* pixel input select not used */
595 vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
597 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_pattern =
598 op_cfg->pixel_pattern;
599 vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input = op_cfg->camif_input;
601 pr_debug("%s: exit\n", __func__);
607 static int msm_isp_set_dual_HW_master_slave_mode(
608 struct vfe_device *vfe_dev, void *arg)
611 * This method assumes no 2 processes are accessing it simultaneously.
612 * Currently this is guaranteed by mutex lock in ioctl.
613 * If that changes, need to revisit this
616 struct msm_isp_set_dual_hw_ms_cmd *dual_hw_ms_cmd = NULL;
617 struct msm_vfe_src_info *src_info = NULL;
620 if (!vfe_dev || !arg) {
621 pr_err("%s: Error! Invalid input vfe_dev %pK arg %pK\n",
622 __func__, vfe_dev, arg);
626 dual_hw_ms_cmd = (struct msm_isp_set_dual_hw_ms_cmd *)arg;
627 vfe_dev->common_data->ms_resource.dual_hw_type = DUAL_HW_MASTER_SLAVE;
628 vfe_dev->vfe_ub_policy = MSM_WM_UB_EQUAL_SLICING;
629 if (dual_hw_ms_cmd->primary_intf < VFE_SRC_MAX) {
630 ISP_DBG("%s: vfe %d primary_intf %d\n", __func__,
631 vfe_dev->pdev->id, dual_hw_ms_cmd->primary_intf);
632 src_info = &vfe_dev->axi_data.
633 src_info[dual_hw_ms_cmd->primary_intf];
634 src_info->dual_hw_ms_info.dual_hw_ms_type =
635 dual_hw_ms_cmd->dual_hw_ms_type;
638 /* No lock needed here since ioctl lock protects 2 session from race */
639 if (src_info != NULL &&
640 dual_hw_ms_cmd->dual_hw_ms_type == MS_TYPE_MASTER) {
641 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
642 ISP_DBG("%s: vfe %d Master\n", __func__, vfe_dev->pdev->id);
644 src_info->dual_hw_ms_info.sof_info =
645 &vfe_dev->common_data->ms_resource.master_sof_info;
646 vfe_dev->common_data->ms_resource.sof_delta_threshold =
647 dual_hw_ms_cmd->sof_delta_threshold;
648 } else if (src_info != NULL) {
650 &vfe_dev->common_data->common_dev_data_lock,
652 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
653 ISP_DBG("%s: vfe %d Slave\n", __func__, vfe_dev->pdev->id);
655 for (j = 0; j < MS_NUM_SLAVE_MAX; j++) {
656 if (vfe_dev->common_data->ms_resource.
657 reserved_slave_mask & (1 << j))
660 vfe_dev->common_data->ms_resource.reserved_slave_mask |=
662 vfe_dev->common_data->ms_resource.num_slave++;
663 src_info->dual_hw_ms_info.sof_info =
664 &vfe_dev->common_data->ms_resource.
666 src_info->dual_hw_ms_info.slave_id = j;
667 ISP_DBG("%s: Slave id %d\n", __func__, j);
670 spin_unlock_irqrestore(
671 &vfe_dev->common_data->common_dev_data_lock,
674 if (j == MS_NUM_SLAVE_MAX) {
675 pr_err("%s: Error! Cannot find free aux resource\n",
680 ISP_DBG("%s: vfe %d num_src %d\n", __func__, vfe_dev->pdev->id,
681 dual_hw_ms_cmd->num_src);
682 if (dual_hw_ms_cmd->num_src > VFE_SRC_MAX) {
683 pr_err("%s: Error! Invalid num_src %d\n", __func__,
684 dual_hw_ms_cmd->num_src);
687 /* This for loop is for non-primary intf to be marked with Master/Slave
688 * in order for frame id sync. But their timestamp is not saved.
689 * So no sof_info resource is allocated
691 for (i = 0; i < dual_hw_ms_cmd->num_src; i++) {
692 if (dual_hw_ms_cmd->input_src[i] >= VFE_SRC_MAX) {
693 pr_err("%s: Error! Invalid SRC param %d\n", __func__,
694 dual_hw_ms_cmd->input_src[i]);
697 ISP_DBG("%s: vfe %d src %d type %d\n", __func__,
698 vfe_dev->pdev->id, dual_hw_ms_cmd->input_src[i],
699 dual_hw_ms_cmd->dual_hw_ms_type);
700 src_info = &vfe_dev->axi_data.
701 src_info[dual_hw_ms_cmd->input_src[i]];
702 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
703 src_info->dual_hw_ms_info.dual_hw_ms_type =
704 dual_hw_ms_cmd->dual_hw_ms_type;
710 static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
714 struct msm_vfe_cfg_cmd_list *proc_cmd =
715 (struct msm_vfe_cfg_cmd_list *)arg;
716 struct msm_vfe_cfg_cmd_list cmd, cmd_next;
718 if (!vfe_dev || !arg) {
719 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
724 rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
726 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
731 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
732 pr_err("%s:%d failed: next size %u != expected %zu\n",
733 __func__, __LINE__, cmd.next_size,
734 sizeof(struct msm_vfe_cfg_cmd_list));
737 if (++count >= MAX_ISP_REG_LIST) {
738 pr_err("%s:%d Error exceeding the max register count:%u\n",
739 __func__, __LINE__, count);
743 if (copy_from_user(&cmd_next, (void __user *)cmd.next,
744 sizeof(struct msm_vfe_cfg_cmd_list))) {
749 rc = msm_isp_proc_cmd(vfe_dev, &cmd_next.cfg_cmd);
751 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
759 struct msm_vfe_cfg_cmd2_32 {
762 compat_caddr_t cfg_data;
763 compat_caddr_t cfg_cmd;
766 struct msm_vfe_cfg_cmd_list_32 {
767 struct msm_vfe_cfg_cmd2_32 cfg_cmd;
772 #define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
773 _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
774 #define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
775 _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
777 static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
778 struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
780 proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
781 proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
782 proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
783 proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
786 static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
790 struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
791 (struct msm_vfe_cfg_cmd_list_32 *)arg;
792 struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
793 struct msm_vfe_cfg_cmd2 current_cmd;
795 if (!vfe_dev || !arg) {
796 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
800 msm_isp_compat_to_proc_cmd(¤t_cmd, &proc_cmd->cfg_cmd);
801 rc = msm_isp_proc_cmd(vfe_dev, ¤t_cmd);
803 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
807 while (compat_ptr(cmd.next) != NULL) {
808 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
809 pr_err("%s:%d failed: next size %u != expected %zu\n",
810 __func__, __LINE__, cmd.next_size,
811 sizeof(struct msm_vfe_cfg_cmd_list));
814 if (++count >= MAX_ISP_REG_LIST) {
815 pr_err("%s:%d Error exceeding the max register count:%u\n",
816 __func__, __LINE__, count);
820 if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
821 sizeof(struct msm_vfe_cfg_cmd_list_32))) {
826 msm_isp_compat_to_proc_cmd(¤t_cmd, &cmd_next.cfg_cmd);
827 rc = msm_isp_proc_cmd(vfe_dev, ¤t_cmd);
829 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
836 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
838 if (is_compat_task())
839 return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
841 return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
843 #else /* CONFIG_COMPAT */
844 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
846 return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
848 #endif /* CONFIG_COMPAT */
850 static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
851 unsigned int cmd, void *arg)
856 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
858 if (!vfe_dev || !vfe_dev->vfe_base) {
859 pr_err("%s:%d failed: invalid params %pK\n",
860 __func__, __LINE__, vfe_dev);
862 pr_err("%s:%d failed %pK\n", __func__,
863 __LINE__, vfe_dev->vfe_base);
867 /* use real time mutex for hard real-time ioctls such as
868 * buffer operations and register updates.
869 * Use core mutex for other ioctls that could take
870 * longer time to complete such as start/stop ISP streams
871 * which blocks until the hardware start/stop streaming
873 ISP_DBG("%s: cmd: %d\n", __func__, _IOC_TYPE(cmd));
875 case VIDIOC_MSM_VFE_REG_CFG: {
876 mutex_lock(&vfe_dev->realtime_mutex);
877 rc = msm_isp_proc_cmd(vfe_dev, arg);
878 mutex_unlock(&vfe_dev->realtime_mutex);
881 case VIDIOC_MSM_VFE_REG_LIST_CFG: {
882 mutex_lock(&vfe_dev->realtime_mutex);
883 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
884 mutex_unlock(&vfe_dev->realtime_mutex);
887 case VIDIOC_MSM_ISP_REQUEST_BUFQ:
889 case VIDIOC_MSM_ISP_ENQUEUE_BUF:
891 case VIDIOC_MSM_ISP_DEQUEUE_BUF:
893 case VIDIOC_MSM_ISP_UNMAP_BUF: {
894 mutex_lock(&vfe_dev->buf_mgr->lock);
895 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
896 mutex_unlock(&vfe_dev->buf_mgr->lock);
899 case VIDIOC_MSM_ISP_RELEASE_BUFQ: {
900 if (vfe_dev->buf_mgr == NULL) {
901 pr_err("%s: buf mgr NULL! rc = -1\n", __func__);
905 mutex_lock(&vfe_dev->buf_mgr->lock);
906 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
907 mutex_unlock(&vfe_dev->buf_mgr->lock);
910 case VIDIOC_MSM_ISP_REQUEST_STREAM:
911 mutex_lock(&vfe_dev->core_mutex);
912 rc = msm_isp_request_axi_stream(vfe_dev, arg);
913 mutex_unlock(&vfe_dev->core_mutex);
915 case VIDIOC_MSM_ISP_RELEASE_STREAM:
916 mutex_lock(&vfe_dev->core_mutex);
917 rc = msm_isp_release_axi_stream(vfe_dev, arg);
918 mutex_unlock(&vfe_dev->core_mutex);
920 case VIDIOC_MSM_ISP_CFG_STREAM:
921 mutex_lock(&vfe_dev->core_mutex);
922 mutex_lock(&vfe_dev->buf_mgr->lock);
923 rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
924 mutex_unlock(&vfe_dev->buf_mgr->lock);
925 mutex_unlock(&vfe_dev->core_mutex);
927 case VIDIOC_MSM_ISP_CFG_HW_STATE:
928 mutex_lock(&vfe_dev->core_mutex);
929 rc = msm_isp_update_stream_bandwidth(vfe_dev,
930 *(enum msm_vfe_hw_state *)arg);
931 mutex_unlock(&vfe_dev->core_mutex);
933 case VIDIOC_MSM_ISP_AXI_HALT:
934 mutex_lock(&vfe_dev->core_mutex);
935 rc = msm_isp_axi_halt(vfe_dev, arg);
936 mutex_unlock(&vfe_dev->core_mutex);
938 case VIDIOC_MSM_ISP_AXI_RESET:
939 mutex_lock(&vfe_dev->core_mutex);
940 if (atomic_read(&vfe_dev->error_info.overflow_state)
942 rc = msm_isp_stats_reset(vfe_dev);
943 rc2 = msm_isp_axi_reset(vfe_dev, arg);
947 pr_err_ratelimited("%s: no HW reset, halt enforced.\n",
950 mutex_unlock(&vfe_dev->core_mutex);
952 case VIDIOC_MSM_ISP_AXI_RESTART:
953 mutex_lock(&vfe_dev->core_mutex);
954 mutex_lock(&vfe_dev->buf_mgr->lock);
955 if (atomic_read(&vfe_dev->error_info.overflow_state)
957 rc = msm_isp_stats_restart(vfe_dev);
958 rc2 = msm_isp_axi_restart(vfe_dev, arg);
962 pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
965 mutex_unlock(&vfe_dev->buf_mgr->lock);
966 mutex_unlock(&vfe_dev->core_mutex);
968 case VIDIOC_MSM_ISP_INPUT_CFG:
969 mutex_lock(&vfe_dev->core_mutex);
970 rc = msm_isp_cfg_input(vfe_dev, arg);
971 mutex_unlock(&vfe_dev->core_mutex);
973 case VIDIOC_MSM_ISP_AHB_CLK_CFG:
974 mutex_lock(&vfe_dev->core_mutex);
975 if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
976 rc = vfe_dev->hw_info->vfe_ops.core_ops.
977 ahb_clk_cfg(vfe_dev, arg);
980 mutex_unlock(&vfe_dev->core_mutex);
982 case VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE:
983 mutex_lock(&vfe_dev->core_mutex);
984 rc = msm_isp_set_dual_HW_master_slave_mode(vfe_dev, arg);
985 mutex_unlock(&vfe_dev->core_mutex);
987 case VIDIOC_MSM_ISP_FETCH_ENG_START:
988 case VIDIOC_MSM_ISP_MAP_BUF_START_FE:
989 mutex_lock(&vfe_dev->core_mutex);
990 rc = msm_isp_start_fetch_engine(vfe_dev, arg);
991 mutex_unlock(&vfe_dev->core_mutex);
994 case VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START:
995 case VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE:
996 mutex_lock(&vfe_dev->core_mutex);
997 rc = msm_isp_start_fetch_engine_multi_pass(vfe_dev, arg);
998 mutex_unlock(&vfe_dev->core_mutex);
1000 case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
1002 enum msm_vfe_input_src frame_src =
1003 *((enum msm_vfe_input_src *)arg);
1004 vfe_dev->hw_info->vfe_ops.core_ops.
1005 reg_update(vfe_dev, frame_src);
1008 case VIDIOC_MSM_ISP_SET_SRC_STATE:
1009 mutex_lock(&vfe_dev->core_mutex);
1010 rc = msm_isp_set_src_state(vfe_dev, arg);
1011 mutex_unlock(&vfe_dev->core_mutex);
1013 case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
1014 mutex_lock(&vfe_dev->core_mutex);
1015 rc = msm_isp_request_stats_stream(vfe_dev, arg);
1016 mutex_unlock(&vfe_dev->core_mutex);
1018 case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
1019 mutex_lock(&vfe_dev->core_mutex);
1020 rc = msm_isp_release_stats_stream(vfe_dev, arg);
1021 mutex_unlock(&vfe_dev->core_mutex);
1023 case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
1024 mutex_lock(&vfe_dev->core_mutex);
1025 mutex_lock(&vfe_dev->buf_mgr->lock);
1026 rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
1027 mutex_unlock(&vfe_dev->buf_mgr->lock);
1028 mutex_unlock(&vfe_dev->core_mutex);
1030 case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
1031 mutex_lock(&vfe_dev->core_mutex);
1032 rc = msm_isp_update_stats_stream(vfe_dev, arg);
1033 mutex_unlock(&vfe_dev->core_mutex);
1035 case VIDIOC_MSM_ISP_UPDATE_STREAM:
1036 mutex_lock(&vfe_dev->core_mutex);
1037 rc = msm_isp_update_axi_stream(vfe_dev, arg);
1038 mutex_unlock(&vfe_dev->core_mutex);
1040 case VIDIOC_MSM_ISP_SMMU_ATTACH:
1041 mutex_lock(&vfe_dev->core_mutex);
1042 rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
1043 mutex_unlock(&vfe_dev->core_mutex);
1045 case VIDIOC_MSM_ISP_OPERATION_CFG:
1046 mutex_lock(&vfe_dev->core_mutex);
1047 msm_isp_operation_cfg(vfe_dev, arg);
1048 mutex_unlock(&vfe_dev->core_mutex);
1050 case VIDIOC_MSM_ISP_AXI_OUTPUT_CFG:
1051 mutex_lock(&vfe_dev->core_mutex);
1052 rc = msm_isp_axi_output_cfg(vfe_dev, arg);
1053 mutex_unlock(&vfe_dev->core_mutex);
1055 case VIDIOC_MSM_ISP_CAMIF_CFG:
1056 mutex_lock(&vfe_dev->core_mutex);
1057 rc = msm_isp_camif_cfg(vfe_dev, arg);
1058 mutex_unlock(&vfe_dev->core_mutex);
1060 case MSM_SD_NOTIFY_FREEZE:
1061 vfe_dev->isp_sof_debug = 0;
1062 vfe_dev->isp_raw0_debug = 0;
1063 vfe_dev->isp_raw1_debug = 0;
1064 vfe_dev->isp_raw2_debug = 0;
1066 case MSM_SD_UNNOTIFY_FREEZE:
1068 case MSM_SD_SHUTDOWN:
1069 while (vfe_dev->vfe_open_cnt != 0)
1070 msm_isp_close_node(sd, NULL);
1072 case VIDIOC_MSM_ISP_SET_CLK_STATUS:
1073 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
1074 vfe_dev->clk_enabled = *((unsigned int *)arg);
1075 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
1079 pr_err_ratelimited("%s: Invalid ISP command %d\n", __func__,
1087 #ifdef CONFIG_COMPAT
1088 static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
1089 unsigned int cmd, void *arg)
1091 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
1094 if (!vfe_dev || !vfe_dev->vfe_base) {
1095 pr_err("%s:%d failed: invalid params %pK\n",
1096 __func__, __LINE__, vfe_dev);
1098 pr_err("%s:%d failed %pK\n", __func__,
1099 __LINE__, vfe_dev->vfe_base);
1104 case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
1105 struct msm_vfe_cfg_cmd2 proc_cmd;
1107 mutex_lock(&vfe_dev->realtime_mutex);
1108 msm_isp_compat_to_proc_cmd(&proc_cmd,
1109 (struct msm_vfe_cfg_cmd2_32 *) arg);
1110 rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
1111 mutex_unlock(&vfe_dev->realtime_mutex);
1114 case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
1115 mutex_lock(&vfe_dev->realtime_mutex);
1116 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
1117 mutex_unlock(&vfe_dev->realtime_mutex);
1121 return msm_isp_ioctl_unlocked(sd, cmd, arg);
1127 long msm_isp_ioctl(struct v4l2_subdev *sd,
1128 unsigned int cmd, void *arg)
1130 return msm_isp_ioctl_compat(sd, cmd, arg);
1132 #else /* CONFIG_COMPAT */
1133 long msm_isp_ioctl(struct v4l2_subdev *sd,
1134 unsigned int cmd, void *arg)
1136 return msm_isp_ioctl_unlocked(sd, cmd, arg);
1138 #endif /* CONFIG_COMPAT */
1140 static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
1141 struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
1142 uint32_t *cfg_data, uint32_t cmd_len)
1144 if (!vfe_dev || !reg_cfg_cmd) {
1145 pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
1146 __LINE__, vfe_dev, reg_cfg_cmd);
1149 if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
1150 (!cfg_data || !cmd_len)) {
1151 pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
1152 __func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
1157 /* Validate input parameters */
1158 switch (reg_cfg_cmd->cmd_type) {
1161 case VFE_WRITE_MB: {
1162 if ((reg_cfg_cmd->u.rw_info.reg_offset >
1163 (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1164 ((reg_cfg_cmd->u.rw_info.reg_offset +
1165 reg_cfg_cmd->u.rw_info.len) >
1166 vfe_dev->vfe_base_size) ||
1167 (reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
1168 pr_err_ratelimited("%s:%d regoffset %d len %d res %d\n",
1170 reg_cfg_cmd->u.rw_info.reg_offset,
1171 reg_cfg_cmd->u.rw_info.len,
1172 (uint32_t)vfe_dev->vfe_base_size);
1176 if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
1177 (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1178 ((reg_cfg_cmd->u.rw_info.cmd_data_offset +
1179 reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
1180 pr_err_ratelimited("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
1182 reg_cfg_cmd->u.rw_info.cmd_data_offset,
1183 reg_cfg_cmd->u.rw_info.len, cmd_len);
1189 case VFE_WRITE_DMI_16BIT:
1190 case VFE_WRITE_DMI_32BIT:
1191 case VFE_WRITE_DMI_64BIT:
1192 case VFE_READ_DMI_16BIT:
1193 case VFE_READ_DMI_32BIT:
1194 case VFE_READ_DMI_64BIT: {
1195 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
1196 reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1197 if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
1198 reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
1199 (reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
1200 reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
1201 (sizeof(uint32_t)))) {
1202 pr_err("%s:%d hi %d lo %d\n",
1204 reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1205 reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
1208 if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
1209 pr_err("%s:%d len %d\n",
1211 reg_cfg_cmd->u.dmi_info.len);
1215 reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
1216 (reg_cfg_cmd->u.dmi_info.len -
1217 sizeof(uint32_t))) ||
1218 ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
1219 reg_cfg_cmd->u.dmi_info.len -
1220 sizeof(uint32_t)) > cmd_len)) {
1221 pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
1223 reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1224 reg_cfg_cmd->u.dmi_info.len, cmd_len);
1228 if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
1229 (UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
1230 ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
1231 reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
1232 pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
1234 reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
1235 reg_cfg_cmd->u.dmi_info.len, cmd_len);
1245 switch (reg_cfg_cmd->cmd_type) {
1247 msm_camera_io_memcpy(vfe_dev->vfe_base +
1248 reg_cfg_cmd->u.rw_info.reg_offset,
1250 (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
1251 reg_cfg_cmd->u.rw_info.len);
1254 case VFE_WRITE_MB: {
1255 msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
1256 reg_cfg_cmd->u.rw_info.reg_offset,
1258 (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
1259 reg_cfg_cmd->u.rw_info.len);
1262 case VFE_CFG_MASK: {
1265 unsigned long flags;
1267 if ((UINT_MAX - sizeof(temp) <
1268 reg_cfg_cmd->u.mask_info.reg_offset) ||
1269 (vfe_dev->vfe_base_size <
1270 reg_cfg_cmd->u.mask_info.reg_offset +
1272 (reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
1273 pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
1276 grab_lock = vfe_dev->hw_info->vfe_ops.core_ops.
1277 is_module_cfg_lock_needed(reg_cfg_cmd->
1278 u.mask_info.reg_offset);
1280 spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
1281 temp = msm_camera_io_r(vfe_dev->vfe_base +
1282 reg_cfg_cmd->u.mask_info.reg_offset);
1284 temp &= ~reg_cfg_cmd->u.mask_info.mask;
1285 temp |= reg_cfg_cmd->u.mask_info.val;
1286 msm_camera_io_w(temp, vfe_dev->vfe_base +
1287 reg_cfg_cmd->u.mask_info.reg_offset);
1289 spin_unlock_irqrestore(&vfe_dev->shared_data_lock,
1293 case VFE_WRITE_DMI_16BIT:
1294 case VFE_WRITE_DMI_32BIT:
1295 case VFE_WRITE_DMI_64BIT: {
1297 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1298 uint32_t hi_val, lo_val, lo_val1;
1300 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
1301 hi_tbl_ptr = cfg_data +
1302 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1304 lo_tbl_ptr = cfg_data +
1305 reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1306 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
1307 reg_cfg_cmd->u.dmi_info.len =
1308 reg_cfg_cmd->u.dmi_info.len / 2;
1309 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1310 lo_val = *lo_tbl_ptr++;
1311 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
1312 lo_val1 = lo_val & 0x0000FFFF;
1313 lo_val = (lo_val & 0xFFFF0000)>>16;
1314 msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
1315 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1316 } else if (reg_cfg_cmd->cmd_type ==
1317 VFE_WRITE_DMI_64BIT) {
1319 hi_val = *hi_tbl_ptr;
1320 hi_tbl_ptr = hi_tbl_ptr + 2;
1321 msm_camera_io_w(hi_val, vfe_dev->vfe_base +
1322 vfe_dev->hw_info->dmi_reg_offset);
1324 msm_camera_io_w(lo_val, vfe_dev->vfe_base +
1325 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1329 case VFE_READ_DMI_16BIT:
1330 case VFE_READ_DMI_32BIT:
1331 case VFE_READ_DMI_64BIT: {
1333 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1334 uint32_t hi_val, lo_val, lo_val1;
1336 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1337 hi_tbl_ptr = cfg_data +
1338 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1341 lo_tbl_ptr = cfg_data +
1342 reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1344 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
1345 reg_cfg_cmd->u.dmi_info.len =
1346 reg_cfg_cmd->u.dmi_info.len / 2;
1348 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1349 lo_val = msm_camera_io_r(vfe_dev->vfe_base +
1350 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1352 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
1353 lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
1354 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1355 lo_val |= lo_val1 << 16;
1357 *lo_tbl_ptr++ = lo_val;
1358 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1359 hi_val = msm_camera_io_r(vfe_dev->vfe_base +
1360 vfe_dev->hw_info->dmi_reg_offset);
1361 *hi_tbl_ptr = hi_val;
1368 case VFE_HW_UPDATE_LOCK: {
1369 uint32_t update_id =
1370 vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
1371 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
1372 || update_id == *cfg_data) {
1373 pr_err("%s hw update lock failed acq %d, cur id %u, last id %u\n",
1376 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
1382 case VFE_HW_UPDATE_UNLOCK: {
1383 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
1385 pr_err("hw update across frame boundary,begin id %u, end id %d\n",
1387 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
1389 vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
1390 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1395 uint32_t *data_ptr = cfg_data +
1396 reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
1397 for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
1398 if ((data_ptr < cfg_data) ||
1399 (UINT_MAX / sizeof(*data_ptr) <
1400 (data_ptr - cfg_data)) ||
1401 (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
1404 *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
1405 reg_cfg_cmd->u.rw_info.reg_offset);
1406 reg_cfg_cmd->u.rw_info.reg_offset += 4;
1410 case GET_MAX_CLK_RATE: {
1414 if (cmd_len != sizeof(__u32)) {
1415 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1416 __func__, __LINE__, cmd_len,
1420 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_max_clk_rate(
1423 pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1427 *(__u32 *)cfg_data = (__u32)rate;
1431 case GET_CLK_RATES: {
1433 struct msm_isp_clk_rates rates;
1434 struct msm_isp_clk_rates *user_data =
1435 (struct msm_isp_clk_rates *)cfg_data;
1436 if (cmd_len != sizeof(struct msm_isp_clk_rates)) {
1437 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1438 __func__, __LINE__, cmd_len,
1439 sizeof(struct msm_isp_clk_rates));
1442 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(
1445 pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1448 user_data->svs_rate = rates.svs_rate;
1449 user_data->nominal_rate = rates.nominal_rate;
1450 user_data->high_rate = rates.high_rate;
1454 uint32_t *isp_id = NULL;
1456 if (cmd_len < sizeof(uint32_t)) {
1457 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1458 __func__, __LINE__, cmd_len,
1463 isp_id = (uint32_t *)cfg_data;
1464 *isp_id = vfe_dev->pdev->id;
1467 case SET_WM_UB_SIZE:
1469 case SET_UB_POLICY: {
1471 if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
1472 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1473 __func__, __LINE__, cmd_len,
1474 sizeof(vfe_dev->vfe_ub_policy));
1477 vfe_dev->vfe_ub_policy = *cfg_data;
1484 int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
1487 struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
1488 struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
1489 uint32_t *cfg_data = NULL;
1491 if (!proc_cmd->num_cfg
1492 || proc_cmd->num_cfg > MAX_ISP_CMD_NUM) {
1493 pr_err("%s: num_cfg outside allowed range\n",
1498 reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
1499 proc_cmd->num_cfg, GFP_KERNEL);
1502 goto reg_cfg_failed;
1505 if (copy_from_user(reg_cfg_cmd,
1506 (void __user *)(proc_cmd->cfg_cmd),
1507 sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
1509 goto copy_cmd_failed;
1512 if (proc_cmd->cmd_len > 0) {
1513 if (proc_cmd->cmd_len > MAX_ISP_CMD_LEN) {
1514 pr_err("%s: cmd_len exceed max allowed\n", __func__);
1516 goto cfg_data_failed;
1519 cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
1522 goto cfg_data_failed;
1525 if (copy_from_user(cfg_data,
1526 (void __user *)(proc_cmd->cfg_data),
1527 proc_cmd->cmd_len)) {
1529 goto copy_cmd_failed;
1533 for (i = 0; i < proc_cmd->num_cfg; i++)
1534 rc = msm_isp_send_hw_cmd(vfe_dev, ®_cfg_cmd[i],
1535 cfg_data, proc_cmd->cmd_len);
1537 if (copy_to_user(proc_cmd->cfg_data,
1538 cfg_data, proc_cmd->cmd_len)) {
1540 goto copy_cmd_failed;
1551 int msm_isp_send_event(struct vfe_device *vfe_dev,
1552 uint32_t event_type,
1553 struct msm_isp_event_data *event_data)
1555 struct v4l2_event isp_event;
1557 memset(&isp_event, 0, sizeof(struct v4l2_event));
1559 isp_event.type = event_type;
1561 memcpy(&isp_event.u.data[0], event_data,
1562 sizeof(struct msm_isp_event_data));
1563 v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
1567 #define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
1569 int msm_isp_cal_word_per_line(uint32_t output_format,
1570 uint32_t pixel_per_line)
1574 switch (output_format) {
1575 case V4L2_PIX_FMT_SBGGR8:
1576 case V4L2_PIX_FMT_SGBRG8:
1577 case V4L2_PIX_FMT_SGRBG8:
1578 case V4L2_PIX_FMT_SRGGB8:
1579 case V4L2_PIX_FMT_QBGGR8:
1580 case V4L2_PIX_FMT_QGBRG8:
1581 case V4L2_PIX_FMT_QGRBG8:
1582 case V4L2_PIX_FMT_QRGGB8:
1583 case V4L2_PIX_FMT_JPEG:
1584 case V4L2_PIX_FMT_META:
1585 val = CAL_WORD(pixel_per_line, 1, 8);
1587 case V4L2_PIX_FMT_SBGGR10:
1588 case V4L2_PIX_FMT_SGBRG10:
1589 case V4L2_PIX_FMT_SGRBG10:
1590 case V4L2_PIX_FMT_SRGGB10:
1591 case V4L2_PIX_FMT_Y10:
1592 case V4L2_PIX_FMT_SBGGR10DPCM6:
1593 case V4L2_PIX_FMT_SGBRG10DPCM6:
1594 case V4L2_PIX_FMT_SGRBG10DPCM6:
1595 case V4L2_PIX_FMT_SRGGB10DPCM6:
1596 case V4L2_PIX_FMT_SBGGR10DPCM8:
1597 case V4L2_PIX_FMT_SGBRG10DPCM8:
1598 case V4L2_PIX_FMT_SGRBG10DPCM8:
1599 case V4L2_PIX_FMT_SRGGB10DPCM8:
1600 case V4L2_PIX_FMT_META10:
1601 val = CAL_WORD(pixel_per_line, 5, 32);
1603 case V4L2_PIX_FMT_SBGGR12:
1604 case V4L2_PIX_FMT_SGBRG12:
1605 case V4L2_PIX_FMT_SGRBG12:
1606 case V4L2_PIX_FMT_SRGGB12:
1607 case V4L2_PIX_FMT_Y12:
1608 val = CAL_WORD(pixel_per_line, 3, 16);
1610 case V4L2_PIX_FMT_SBGGR14:
1611 case V4L2_PIX_FMT_SGBRG14:
1612 case V4L2_PIX_FMT_SGRBG14:
1613 case V4L2_PIX_FMT_SRGGB14:
1614 val = CAL_WORD(pixel_per_line, 7, 32);
1616 case V4L2_PIX_FMT_QBGGR10:
1617 case V4L2_PIX_FMT_QGBRG10:
1618 case V4L2_PIX_FMT_QGRBG10:
1619 case V4L2_PIX_FMT_QRGGB10:
1620 val = CAL_WORD(pixel_per_line, 1, 6);
1622 case V4L2_PIX_FMT_QBGGR12:
1623 case V4L2_PIX_FMT_QGBRG12:
1624 case V4L2_PIX_FMT_QGRBG12:
1625 case V4L2_PIX_FMT_QRGGB12:
1626 val = CAL_WORD(pixel_per_line, 1, 5);
1628 case V4L2_PIX_FMT_QBGGR14:
1629 case V4L2_PIX_FMT_QGBRG14:
1630 case V4L2_PIX_FMT_QGRBG14:
1631 case V4L2_PIX_FMT_QRGGB14:
1632 val = CAL_WORD(pixel_per_line, 1, 4);
1634 case V4L2_PIX_FMT_NV12:
1635 case V4L2_PIX_FMT_NV21:
1636 case V4L2_PIX_FMT_NV14:
1637 case V4L2_PIX_FMT_NV41:
1638 case V4L2_PIX_FMT_NV16:
1639 case V4L2_PIX_FMT_NV61:
1640 case V4L2_PIX_FMT_GREY:
1641 val = CAL_WORD(pixel_per_line, 1, 8);
1643 case V4L2_PIX_FMT_YUYV:
1644 case V4L2_PIX_FMT_YVYU:
1645 case V4L2_PIX_FMT_UYVY:
1646 case V4L2_PIX_FMT_VYUY:
1647 val = CAL_WORD(pixel_per_line, 2, 8);
1649 case V4L2_PIX_FMT_P16BGGR10:
1650 case V4L2_PIX_FMT_P16GBRG10:
1651 case V4L2_PIX_FMT_P16GRBG10:
1652 case V4L2_PIX_FMT_P16RGGB10:
1653 val = CAL_WORD(pixel_per_line, 1, 4);
1655 case V4L2_PIX_FMT_NV24:
1656 case V4L2_PIX_FMT_NV42:
1657 val = CAL_WORD(pixel_per_line, 1, 8);
1659 /* TD: Add more image format */
1661 msm_isp_print_fourcc_error(__func__, output_format);
1667 enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
1669 switch (output_format) {
1670 case V4L2_PIX_FMT_SBGGR8:
1671 case V4L2_PIX_FMT_SGBRG8:
1672 case V4L2_PIX_FMT_SGRBG8:
1673 case V4L2_PIX_FMT_SRGGB8:
1674 case V4L2_PIX_FMT_SBGGR10:
1675 case V4L2_PIX_FMT_SGBRG10:
1676 case V4L2_PIX_FMT_SGRBG10:
1677 case V4L2_PIX_FMT_SRGGB10:
1678 case V4L2_PIX_FMT_SBGGR10DPCM6:
1679 case V4L2_PIX_FMT_SGBRG10DPCM6:
1680 case V4L2_PIX_FMT_SGRBG10DPCM6:
1681 case V4L2_PIX_FMT_SRGGB10DPCM6:
1682 case V4L2_PIX_FMT_SBGGR10DPCM8:
1683 case V4L2_PIX_FMT_SGBRG10DPCM8:
1684 case V4L2_PIX_FMT_SGRBG10DPCM8:
1685 case V4L2_PIX_FMT_SRGGB10DPCM8:
1686 case V4L2_PIX_FMT_SBGGR12:
1687 case V4L2_PIX_FMT_SGBRG12:
1688 case V4L2_PIX_FMT_SGRBG12:
1689 case V4L2_PIX_FMT_SRGGB12:
1690 case V4L2_PIX_FMT_SBGGR14:
1691 case V4L2_PIX_FMT_SGBRG14:
1692 case V4L2_PIX_FMT_SGRBG14:
1693 case V4L2_PIX_FMT_SRGGB14:
1694 case V4L2_PIX_FMT_GREY:
1695 case V4L2_PIX_FMT_Y10:
1696 case V4L2_PIX_FMT_Y12:
1698 case V4L2_PIX_FMT_QBGGR8:
1699 case V4L2_PIX_FMT_QGBRG8:
1700 case V4L2_PIX_FMT_QGRBG8:
1701 case V4L2_PIX_FMT_QRGGB8:
1702 case V4L2_PIX_FMT_QBGGR10:
1703 case V4L2_PIX_FMT_QGBRG10:
1704 case V4L2_PIX_FMT_QGRBG10:
1705 case V4L2_PIX_FMT_QRGGB10:
1706 case V4L2_PIX_FMT_QBGGR12:
1707 case V4L2_PIX_FMT_QGBRG12:
1708 case V4L2_PIX_FMT_QGRBG12:
1709 case V4L2_PIX_FMT_QRGGB12:
1710 case V4L2_PIX_FMT_QBGGR14:
1711 case V4L2_PIX_FMT_QGBRG14:
1712 case V4L2_PIX_FMT_QGRBG14:
1713 case V4L2_PIX_FMT_QRGGB14:
1715 case V4L2_PIX_FMT_P16BGGR10:
1716 case V4L2_PIX_FMT_P16GBRG10:
1717 case V4L2_PIX_FMT_P16GRBG10:
1718 case V4L2_PIX_FMT_P16RGGB10:
1721 msm_isp_print_fourcc_error(__func__, output_format);
1727 int msm_isp_get_bit_per_pixel(uint32_t output_format)
1729 switch (output_format) {
1730 case V4L2_PIX_FMT_Y4:
1732 case V4L2_PIX_FMT_Y6:
1734 case V4L2_PIX_FMT_SBGGR8:
1735 case V4L2_PIX_FMT_SGBRG8:
1736 case V4L2_PIX_FMT_SGRBG8:
1737 case V4L2_PIX_FMT_SRGGB8:
1738 case V4L2_PIX_FMT_QBGGR8:
1739 case V4L2_PIX_FMT_QGBRG8:
1740 case V4L2_PIX_FMT_QGRBG8:
1741 case V4L2_PIX_FMT_QRGGB8:
1742 case V4L2_PIX_FMT_JPEG:
1743 case V4L2_PIX_FMT_META:
1744 case V4L2_PIX_FMT_NV12:
1745 case V4L2_PIX_FMT_NV21:
1746 case V4L2_PIX_FMT_NV14:
1747 case V4L2_PIX_FMT_NV41:
1748 case V4L2_PIX_FMT_YVU410:
1749 case V4L2_PIX_FMT_YVU420:
1750 case V4L2_PIX_FMT_YUYV:
1751 case V4L2_PIX_FMT_YYUV:
1752 case V4L2_PIX_FMT_YVYU:
1753 case V4L2_PIX_FMT_UYVY:
1754 case V4L2_PIX_FMT_VYUY:
1755 case V4L2_PIX_FMT_YUV422P:
1756 case V4L2_PIX_FMT_YUV411P:
1757 case V4L2_PIX_FMT_Y41P:
1758 case V4L2_PIX_FMT_YUV444:
1759 case V4L2_PIX_FMT_YUV555:
1760 case V4L2_PIX_FMT_YUV565:
1761 case V4L2_PIX_FMT_YUV32:
1762 case V4L2_PIX_FMT_YUV410:
1763 case V4L2_PIX_FMT_YUV420:
1764 case V4L2_PIX_FMT_GREY:
1765 case V4L2_PIX_FMT_PAL8:
1766 case V4L2_PIX_FMT_UV8:
1767 case MSM_V4L2_PIX_FMT_META:
1769 case V4L2_PIX_FMT_SBGGR10:
1770 case V4L2_PIX_FMT_SGBRG10:
1771 case V4L2_PIX_FMT_SGRBG10:
1772 case V4L2_PIX_FMT_SRGGB10:
1773 case V4L2_PIX_FMT_SBGGR10DPCM6:
1774 case V4L2_PIX_FMT_SGBRG10DPCM6:
1775 case V4L2_PIX_FMT_SGRBG10DPCM6:
1776 case V4L2_PIX_FMT_SRGGB10DPCM6:
1777 case V4L2_PIX_FMT_SBGGR10DPCM8:
1778 case V4L2_PIX_FMT_SGBRG10DPCM8:
1779 case V4L2_PIX_FMT_SGRBG10DPCM8:
1780 case V4L2_PIX_FMT_SRGGB10DPCM8:
1781 case V4L2_PIX_FMT_QBGGR10:
1782 case V4L2_PIX_FMT_QGBRG10:
1783 case V4L2_PIX_FMT_QGRBG10:
1784 case V4L2_PIX_FMT_QRGGB10:
1785 case V4L2_PIX_FMT_Y10:
1786 case V4L2_PIX_FMT_Y10BPACK:
1787 case V4L2_PIX_FMT_P16BGGR10:
1788 case V4L2_PIX_FMT_P16GBRG10:
1789 case V4L2_PIX_FMT_P16GRBG10:
1790 case V4L2_PIX_FMT_P16RGGB10:
1791 case V4L2_PIX_FMT_META10:
1792 case MSM_V4L2_PIX_FMT_META10:
1794 case V4L2_PIX_FMT_SBGGR12:
1795 case V4L2_PIX_FMT_SGBRG12:
1796 case V4L2_PIX_FMT_SGRBG12:
1797 case V4L2_PIX_FMT_SRGGB12:
1798 case V4L2_PIX_FMT_QBGGR12:
1799 case V4L2_PIX_FMT_QGBRG12:
1800 case V4L2_PIX_FMT_QGRBG12:
1801 case V4L2_PIX_FMT_QRGGB12:
1802 case V4L2_PIX_FMT_Y12:
1804 case V4L2_PIX_FMT_SBGGR14:
1805 case V4L2_PIX_FMT_SGBRG14:
1806 case V4L2_PIX_FMT_SGRBG14:
1807 case V4L2_PIX_FMT_SRGGB14:
1808 case V4L2_PIX_FMT_QBGGR14:
1809 case V4L2_PIX_FMT_QGBRG14:
1810 case V4L2_PIX_FMT_QGRBG14:
1811 case V4L2_PIX_FMT_QRGGB14:
1813 case V4L2_PIX_FMT_NV16:
1814 case V4L2_PIX_FMT_NV61:
1815 case V4L2_PIX_FMT_Y16:
1817 case V4L2_PIX_FMT_NV24:
1818 case V4L2_PIX_FMT_NV42:
1820 /* TD: Add more image format */
1822 msm_isp_print_fourcc_error(__func__, output_format);
1823 pr_err("%s: Invalid output format %x\n",
1824 __func__, output_format);
1829 void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
1831 struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1833 error_info->info_dump_frame_count++;
1837 static int msm_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
1839 int rc = vfe_dev->buf_mgr->pagefault_debug_disable;
1841 pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %pK\n", __func__,
1842 __LINE__, vfe_dev->pdev->id, vfe_dev);
1844 msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
1846 if (vfe_dev->buf_mgr->pagefault_debug_disable == 0) {
1847 vfe_dev->buf_mgr->pagefault_debug_disable = 1;
1848 vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
1849 vfe_dev->page_fault_addr);
1850 msm_isp_print_ping_pong_address(vfe_dev,
1851 vfe_dev->page_fault_addr);
1852 vfe_dev->hw_info->vfe_ops.axi_ops.
1853 read_wm_ping_pong_addr(vfe_dev);
1858 void msm_isp_process_error_info(struct vfe_device *vfe_dev)
1860 struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1862 if (error_info->error_count == 1 ||
1863 !(error_info->info_dump_frame_count % 100)) {
1864 vfe_dev->hw_info->vfe_ops.core_ops.
1865 process_error_status(vfe_dev);
1866 error_info->error_mask0 = 0;
1867 error_info->error_mask1 = 0;
1868 error_info->camif_status = 0;
1869 error_info->violation_status = 0;
1873 static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
1874 uint32_t error_mask0, uint32_t error_mask1)
1876 vfe_dev->error_info.error_mask0 |= error_mask0;
1877 vfe_dev->error_info.error_mask1 |= error_mask1;
1878 vfe_dev->error_info.error_count++;
1881 void msm_isp_process_overflow_irq(
1882 struct vfe_device *vfe_dev,
1883 uint32_t *irq_status0, uint32_t *irq_status1,
1884 uint32_t force_overflow)
1886 uint32_t overflow_mask;
1888 /* if there are no active streams - do not start recovery */
1889 if (!vfe_dev->axi_data.num_active_stream)
1892 /* Mask out all other irqs if recovery is started */
1893 if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
1894 uint32_t halt_restart_mask0, halt_restart_mask1;
1896 vfe_dev->hw_info->vfe_ops.core_ops.
1897 get_halt_restart_mask(&halt_restart_mask0,
1898 &halt_restart_mask1);
1899 *irq_status0 &= halt_restart_mask0;
1900 *irq_status1 &= halt_restart_mask1;
1905 /* Check if any overflow bit is set */
1906 vfe_dev->hw_info->vfe_ops.core_ops.
1907 get_overflow_mask(&overflow_mask);
1908 if (!force_overflow)
1909 overflow_mask &= *irq_status1;
1911 if (overflow_mask) {
1912 struct msm_isp_event_data error_event;
1914 if (vfe_dev->reset_pending == 1) {
1915 pr_err("%s:%d failed: overflow %x during reset\n",
1916 __func__, __LINE__, overflow_mask);
1917 /* Clear overflow bits since reset is pending */
1918 *irq_status1 &= ~overflow_mask;
1922 ISP_DBG("%s: VFE%d Bus overflow detected: start recovery!\n",
1923 __func__, vfe_dev->pdev->id);
1926 /* maks off irq for current vfe */
1927 atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
1928 NO_OVERFLOW, OVERFLOW_DETECTED);
1929 vfe_dev->recovery_irq0_mask = vfe_dev->irq0_mask;
1930 vfe_dev->recovery_irq1_mask = vfe_dev->irq1_mask;
1932 vfe_dev->hw_info->vfe_ops.core_ops.
1933 set_halt_restart_mask(vfe_dev);
1935 /* mask off other vfe if dual vfe is used */
1936 if (vfe_dev->is_split) {
1937 uint32_t other_vfe_id;
1938 struct vfe_device *other_vfe_dev;
1940 other_vfe_id = (vfe_dev->pdev->id == ISP_VFE0) ?
1941 ISP_VFE1 : ISP_VFE0;
1942 other_vfe_dev = vfe_dev->common_data->
1943 dual_vfe_res->vfe_dev[other_vfe_id];
1944 if (other_vfe_dev) {
1945 other_vfe_dev->recovery_irq0_mask =
1946 other_vfe_dev->irq0_mask;
1947 other_vfe_dev->recovery_irq1_mask =
1948 other_vfe_dev->irq1_mask;
1951 atomic_cmpxchg(&(vfe_dev->common_data->dual_vfe_res->
1952 vfe_dev[other_vfe_id]->
1953 error_info.overflow_state),
1954 NO_OVERFLOW, OVERFLOW_DETECTED);
1956 vfe_dev->hw_info->vfe_ops.core_ops.
1957 set_halt_restart_mask(vfe_dev->common_data->
1958 dual_vfe_res->vfe_dev[other_vfe_id]);
1961 /* reset irq status so skip further process */
1965 /* send overflow event as needed */
1966 if (atomic_read(&vfe_dev->error_info.overflow_state)
1968 memset(&error_event, 0, sizeof(error_event));
1969 error_event.frame_id =
1970 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1971 error_event.u.error_info.err_type =
1972 ISP_ERROR_BUS_OVERFLOW;
1973 msm_isp_send_event(vfe_dev,
1974 ISP_EVENT_ERROR, &error_event);
1979 void msm_isp_reset_burst_count_and_frame_drop(
1980 struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
1982 if ((stream_info->state != ACTIVE) ||
1983 (stream_info->stream_type != BURST_STREAM)) {
1986 if (stream_info->num_burst_capture != 0)
1987 msm_isp_reset_framedrop(vfe_dev, stream_info);
1990 static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
1991 uint32_t irq_status0, uint32_t irq_status1,
1992 uint32_t ping_pong_status)
1994 unsigned long flags;
1995 struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
1997 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
1998 queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
1999 if (queue_cmd->cmd_used) {
2000 ISP_DBG("%s: Tasklet queue overflow: %d\n",
2001 __func__, vfe_dev->pdev->id);
2002 list_del(&queue_cmd->list);
2004 atomic_add(1, &vfe_dev->irq_cnt);
2006 queue_cmd->vfeInterruptStatus0 = irq_status0;
2007 queue_cmd->vfeInterruptStatus1 = irq_status1;
2008 queue_cmd->vfePingPongStatus = ping_pong_status;
2009 msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
2010 queue_cmd->cmd_used = 1;
2011 vfe_dev->taskletq_idx = (vfe_dev->taskletq_idx + 1) %
2012 MSM_VFE_TASKLETQ_SIZE;
2013 list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
2014 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2015 tasklet_hi_schedule(&vfe_dev->vfe_tasklet);
2018 irqreturn_t msm_isp_process_irq(int irq_num, void *data)
2020 struct vfe_device *vfe_dev = (struct vfe_device *) data;
2021 uint32_t irq_status0, irq_status1, ping_pong_status;
2022 uint32_t error_mask0, error_mask1;
2024 vfe_dev->hw_info->vfe_ops.irq_ops.
2025 read_irq_status_and_clear(vfe_dev, &irq_status0, &irq_status1);
2027 if ((irq_status0 == 0) && (irq_status1 == 0)) {
2028 pr_err("%s:VFE%d irq_status0 & 1 are both 0\n",
2029 __func__, vfe_dev->pdev->id);
2033 ping_pong_status = vfe_dev->hw_info->vfe_ops.axi_ops.
2034 get_pingpong_status(vfe_dev);
2035 if (vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq) {
2036 vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq(vfe_dev,
2039 msm_isp_process_overflow_irq(vfe_dev,
2040 &irq_status0, &irq_status1, 0);
2042 vfe_dev->hw_info->vfe_ops.core_ops.
2043 get_error_mask(&error_mask0, &error_mask1);
2044 error_mask0 &= irq_status0;
2045 error_mask1 &= irq_status1;
2046 irq_status0 &= ~error_mask0;
2047 irq_status1 &= ~error_mask1;
2048 if ((error_mask0 != 0) || (error_mask1 != 0))
2049 msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
2051 if ((irq_status0 == 0) && (irq_status1 == 0) &&
2052 (!(((error_mask0 != 0) || (error_mask1 != 0)) &&
2053 vfe_dev->error_info.error_count == 1))) {
2054 ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
2057 dump_data.vfe_dev = (struct vfe_device *) data;
2058 if (vfe_dev->is_split &&
2059 (vfe_dev->common_data->dual_vfe_res->vfe_dev[
2060 !vfe_dev->pdev->id]) &&
2061 (vfe_dev->common_data->dual_vfe_res->vfe_dev[
2062 !vfe_dev->pdev->id]->vfe_open_cnt)) {
2063 spin_lock(&dump_irq_lock);
2064 dump_data.arr[dump_data.first].current_vfe_irq.
2065 vfe_id = vfe_dev->pdev->id;
2066 dump_data.arr[dump_data.first].current_vfe_irq.
2067 irq_status0 = irq_status0;
2068 dump_data.arr[dump_data.first].current_vfe_irq.
2069 irq_status1 = irq_status1;
2070 dump_data.arr[dump_data.first].current_vfe_irq.
2071 ping_pong_status = ping_pong_status;
2073 dump_data.arr[dump_data.first].other_vfe.
2074 vfe_id = (!vfe_dev->pdev->id);
2075 vfe_dev->hw_info->vfe_ops.irq_ops.
2077 vfe_dev->common_data->dual_vfe_res->vfe_dev[
2078 !vfe_dev->pdev->id],
2079 &dump_data.arr[dump_data.first].other_vfe.irq_status0,
2080 &dump_data.arr[dump_data.first].other_vfe.irq_status1);
2081 dump_data.arr[dump_data.first].other_vfe.
2083 vfe_dev->hw_info->vfe_ops.axi_ops.
2084 get_pingpong_status(
2085 vfe_dev->common_data->dual_vfe_res->vfe_dev[
2086 !vfe_dev->pdev->id]);
2087 msm_isp_get_timestamp(&dump_data.arr[dump_data.first].
2088 other_vfe.ts, vfe_dev);
2090 (dump_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2091 dump_data.fill_count++;
2092 spin_unlock(&dump_irq_lock);
2094 msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1,
2101 void msm_isp_do_tasklet(unsigned long data)
2103 unsigned long flags;
2104 struct vfe_device *vfe_dev = (struct vfe_device *) data;
2105 struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
2106 struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2107 struct msm_isp_timestamp ts;
2108 uint32_t irq_status0, irq_status1, pingpong_status;
2110 if (vfe_dev->vfe_base == NULL || vfe_dev->vfe_open_cnt == 0) {
2111 ISP_DBG("%s: VFE%d open cnt = %d, device closed(base = %pK)\n",
2112 __func__, vfe_dev->pdev->id, vfe_dev->vfe_open_cnt,
2117 while (atomic_read(&vfe_dev->irq_cnt)) {
2118 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
2119 queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
2120 struct msm_vfe_tasklet_queue_cmd, list);
2123 atomic_set(&vfe_dev->irq_cnt, 0);
2124 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2127 atomic_sub(1, &vfe_dev->irq_cnt);
2128 list_del(&queue_cmd->list);
2130 if (!vfe_dev->clk_enabled) {
2131 /* client closed, delayed task should exit directly */
2132 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2136 queue_cmd->cmd_used = 0;
2137 irq_status0 = queue_cmd->vfeInterruptStatus0;
2138 irq_status1 = queue_cmd->vfeInterruptStatus1;
2139 pingpong_status = queue_cmd->vfePingPongStatus;
2141 /* related to rw reg, need to be protected */
2142 irq_ops->process_halt_irq(vfe_dev,
2143 irq_status0, irq_status1);
2144 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2145 ISP_DBG("%s: vfe_id %d status0: 0x%x status1: 0x%x\n",
2146 __func__, vfe_dev->pdev->id, irq_status0, irq_status1);
2147 if (vfe_dev->is_split) {
2148 spin_lock(&dump_tasklet_lock);
2149 tasklet_data.arr[tasklet_data.first].
2150 current_vfe_irq.vfe_id = vfe_dev->pdev->id;
2151 tasklet_data.arr[tasklet_data.first].
2152 current_vfe_irq.core = smp_processor_id();
2153 tasklet_data.arr[tasklet_data.first].
2154 current_vfe_irq.irq_status0 = irq_status0;
2155 tasklet_data.arr[tasklet_data.first].
2156 current_vfe_irq.irq_status1 = irq_status1;
2157 tasklet_data.arr[tasklet_data.first].
2158 current_vfe_irq.ping_pong_status = pingpong_status;
2159 tasklet_data.arr[tasklet_data.first].
2160 current_vfe_irq.ts = ts;
2161 tasklet_data.first =
2162 (tasklet_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2163 spin_unlock(&dump_tasklet_lock);
2165 irq_ops->process_reset_irq(vfe_dev,
2166 irq_status0, irq_status1);
2167 if (atomic_read(&vfe_dev->error_info.overflow_state)
2169 ISP_DBG("%s: Recovery in processing, Ignore IRQs!!!\n",
2173 msm_isp_process_error_info(vfe_dev);
2174 irq_ops->process_stats_irq(vfe_dev,
2175 irq_status0, irq_status1,
2176 pingpong_status, &ts);
2177 irq_ops->process_axi_irq(vfe_dev,
2178 irq_status0, irq_status1,
2179 pingpong_status, &ts);
2180 irq_ops->process_camif_irq(vfe_dev,
2181 irq_status0, irq_status1, &ts);
2182 irq_ops->process_reg_update(vfe_dev,
2183 irq_status0, irq_status1, &ts);
2184 irq_ops->process_sof_irq(vfe_dev,
2185 irq_status0, irq_status1, &ts);
2186 irq_ops->process_epoch_irq(vfe_dev,
2187 irq_status0, irq_status1, &ts);
2191 int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
2193 struct msm_vfe_axi_src_state *src_state = arg;
2195 if (src_state->input_src >= VFE_SRC_MAX)
2197 vfe_dev->axi_data.src_info[src_state->input_src].active =
2198 src_state->src_active;
2199 vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
2200 src_state->src_frame_id;
2204 static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
2205 struct device *dev, unsigned long iova, int flags, void *token)
2207 struct vfe_device *vfe_dev = NULL;
2210 vfe_dev = (struct vfe_device *)token;
2211 vfe_dev->page_fault_addr = iova;
2212 if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops ||
2213 !vfe_dev->axi_data.num_active_stream) {
2214 pr_err("%s:%d buf_mgr %pK active strms %d\n", __func__,
2215 __LINE__, vfe_dev->buf_mgr,
2216 vfe_dev->axi_data.num_active_stream);
2220 mutex_lock(&vfe_dev->core_mutex);
2221 if (vfe_dev->vfe_open_cnt > 0) {
2222 atomic_set(&vfe_dev->error_info.overflow_state,
2224 msm_isp_process_iommu_page_fault(vfe_dev);
2226 pr_err("%s: no handling, vfe open cnt = %d\n",
2227 __func__, vfe_dev->vfe_open_cnt);
2229 mutex_unlock(&vfe_dev->core_mutex);
2231 ISP_DBG("%s:%d] no token received: %pK\n",
2232 __func__, __LINE__, token);
2239 int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2241 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2243 enum cam_ahb_clk_client id;
2245 ISP_DBG("%s open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2247 if (vfe_dev->common_data == NULL ||
2248 vfe_dev->common_data->dual_vfe_res == NULL) {
2249 pr_err("%s: Error in probe. No common_data or dual vfe res\n",
2254 if (vfe_dev->pdev->id == ISP_VFE0)
2255 vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
2257 mutex_lock(&vfe_dev->realtime_mutex);
2258 mutex_lock(&vfe_dev->core_mutex);
2260 if (vfe_dev->vfe_open_cnt++) {
2261 mutex_unlock(&vfe_dev->core_mutex);
2262 mutex_unlock(&vfe_dev->realtime_mutex);
2266 vfe_dev->reset_pending = 0;
2267 vfe_dev->isp_sof_debug = 0;
2268 vfe_dev->isp_raw0_debug = 0;
2269 vfe_dev->isp_raw1_debug = 0;
2270 vfe_dev->isp_raw2_debug = 0;
2272 if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
2273 pr_err("%s: init hardware failed\n", __func__);
2274 vfe_dev->vfe_open_cnt--;
2275 mutex_unlock(&vfe_dev->core_mutex);
2276 mutex_unlock(&vfe_dev->realtime_mutex);
2280 memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2281 atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
2283 vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
2285 vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
2286 ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
2287 rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
2289 pr_err("%s: reset timeout\n", __func__);
2290 vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2291 vfe_dev->vfe_open_cnt--;
2292 mutex_unlock(&vfe_dev->core_mutex);
2293 mutex_unlock(&vfe_dev->realtime_mutex);
2297 vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
2299 vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr,
2302 memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
2303 memset(&vfe_dev->stats_data, 0,
2304 sizeof(struct msm_vfe_stats_shared_data));
2305 memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2306 memset(&vfe_dev->fetch_engine_info, 0,
2307 sizeof(vfe_dev->fetch_engine_info));
2308 vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
2309 vfe_dev->axi_data.enable_frameid_recovery = 0;
2310 vfe_dev->taskletq_idx = 0;
2311 vfe_dev->vt_enable = 0;
2312 vfe_dev->reg_update_requested = 0;
2313 /* Register page fault handler */
2314 vfe_dev->buf_mgr->pagefault_debug_disable = 0;
2315 cam_smmu_reg_client_page_fault_handler(
2316 vfe_dev->buf_mgr->iommu_hdl,
2317 msm_vfe_iommu_fault_handler, vfe_dev);
2319 /* Disable vfe clks and allow device to go XO shutdown mode */
2320 if (vfe_dev->pdev->id == 0)
2321 id = CAM_AHB_CLIENT_VFE0;
2323 id = CAM_AHB_CLIENT_VFE1;
2324 if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
2325 pr_err("%s: failed to remove vote for AHB\n", __func__);
2326 vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
2327 vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
2329 mutex_unlock(&vfe_dev->core_mutex);
2330 mutex_unlock(&vfe_dev->realtime_mutex);
2334 #ifdef CONFIG_MSM_AVTIMER
2335 static void msm_isp_end_avtimer(void)
2337 avcs_core_disable_power_collapse(0);
2340 static void msm_isp_end_avtimer(void)
2342 pr_err("AV Timer is not supported\n");
2346 int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2350 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2351 enum cam_ahb_clk_client id;
2353 ISP_DBG("%s E open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2354 mutex_lock(&vfe_dev->realtime_mutex);
2355 mutex_lock(&vfe_dev->core_mutex);
2357 /* Enable vfe clks to wake up from XO shutdown mode */
2358 if (vfe_dev->pdev->id == 0)
2359 id = CAM_AHB_CLIENT_VFE0;
2361 id = CAM_AHB_CLIENT_VFE1;
2362 if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE) < 0)
2363 pr_err("%s: failed to vote for AHB\n", __func__);
2364 vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 1);
2365 vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 1);
2367 if (!vfe_dev->vfe_open_cnt) {
2368 pr_err("%s invalid state open cnt %d\n", __func__,
2369 vfe_dev->vfe_open_cnt);
2370 mutex_unlock(&vfe_dev->core_mutex);
2371 mutex_unlock(&vfe_dev->realtime_mutex);
2375 if (vfe_dev->vfe_open_cnt > 1) {
2376 vfe_dev->vfe_open_cnt--;
2377 mutex_unlock(&vfe_dev->core_mutex);
2378 mutex_unlock(&vfe_dev->realtime_mutex);
2381 /* Unregister page fault handler */
2382 cam_smmu_reg_client_page_fault_handler(
2383 vfe_dev->buf_mgr->iommu_hdl,
2386 rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
2388 pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
2390 vfe_dev->hw_info->vfe_ops.core_ops.
2391 update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
2392 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 0);
2394 /* after regular hw stop, reduce open cnt */
2395 vfe_dev->vfe_open_cnt--;
2397 /* put scratch buf in all the wm */
2398 for (wm = 0; wm < vfe_dev->axi_data.hw_info->num_wm; wm++) {
2399 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PING_FLAG);
2400 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PONG_FLAG);
2402 vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2403 vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
2404 if (vfe_dev->vt_enable) {
2405 msm_isp_end_avtimer();
2406 vfe_dev->vt_enable = 0;
2408 vfe_dev->is_split = 0;
2410 mutex_unlock(&vfe_dev->core_mutex);
2411 mutex_unlock(&vfe_dev->realtime_mutex);
2415 void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
2417 unsigned long flags;
2418 struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2420 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
2421 while (atomic_read(&vfe_dev->irq_cnt)) {
2422 queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
2423 struct msm_vfe_tasklet_queue_cmd, list);
2426 atomic_set(&vfe_dev->irq_cnt, 0);
2429 atomic_sub(1, &vfe_dev->irq_cnt);
2430 list_del(&queue_cmd->list);
2431 queue_cmd->cmd_used = 0;
2433 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2436 void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
2437 enum msm_vfe_input_src frame_src)
2439 struct msm_vfe_axi_stream *stream_info = NULL;
2441 unsigned long flags;
2443 for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
2444 stream_info = &vfe_dev->axi_data.stream_info[j];
2445 if (stream_info->state != ACTIVE)
2447 if (frame_src != SRC_TO_INTF(stream_info->stream_src))
2451 &vfe_dev->axi_data.stream_info[j];
2452 spin_lock_irqsave(&stream_info->lock, flags);
2453 stream_info->activated_framedrop_period =
2454 stream_info->requested_framedrop_period;
2455 spin_unlock_irqrestore(&stream_info->lock, flags);
2459 static void msm_isp_dump_irq_debug(void)
2461 uint32_t index, count, i;
2463 if (dump_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
2464 index = dump_data.first;
2465 count = MAX_ISP_PING_PONG_DUMP_SIZE;
2468 count = dump_data.first;
2470 for (i = 0; i < count; i++) {
2471 pr_err("%s: trace_msm_cam_ping_pong_debug_dump need re-impl\n",
2473 index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2477 void msm_isp_dump_taskelet_debug(void)
2479 uint32_t index, count, i;
2481 if (tasklet_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
2482 index = tasklet_data.first;
2483 count = MAX_ISP_PING_PONG_DUMP_SIZE;
2486 count = tasklet_data.first;
2488 for (i = 0; i < count; i++) {
2489 pr_err("%s: trace_msm_cam_tasklet_debug_dump need implement\n",
2491 index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2495 void msm_isp_dump_ping_pong_mismatch(void)
2499 spin_lock(&dump_tasklet_lock);
2500 for (i = 0; i < MAX_VFE; i++) {
2501 dump_data.vfe_dev->hw_info->vfe_ops.axi_ops.
2503 dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[i]);
2505 (uint32_t)dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[
2506 i]->vfe_irq->start);
2508 trace_msm_cam_string(" ***** msm_isp_dump_irq_debug ****");
2509 msm_isp_dump_irq_debug();
2510 trace_msm_cam_string(" ***** msm_isp_dump_taskelet_debug ****");
2511 msm_isp_dump_taskelet_debug();
2512 spin_unlock(&dump_tasklet_lock);