1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 #include <linux/mutex.h>
14 #include <media/v4l2-subdev.h>
15 #include <linux/ratelimit.h>
18 #include "msm_isp_util.h"
19 #include "msm_isp_axi_util.h"
20 #include "msm_isp_stats_util.h"
21 #include "msm_camera_io_util.h"
22 #include "cam_smmu_api.h"
23 #include "msm_isp48.h"
24 #define CREATE_TRACE_POINTS
25 #include "trace/events/msm_cam.h"
28 #define MAX_ISP_V4l2_EVENTS 100
29 #define MAX_ISP_REG_LIST 100
30 static DEFINE_MUTEX(bandwidth_mgr_mutex);
31 static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
33 #define MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev) { \
34 if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) { \
35 struct vfe_device *vfe1_dev = vfe_dev->common_data-> \
36 dual_vfe_res->vfe_dev[ISP_VFE1]; \
37 mutex_lock(&vfe1_dev->core_mutex); \
41 #define MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev) { \
42 if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) { \
43 struct vfe_device *vfe1_dev = vfe_dev->common_data-> \
44 dual_vfe_res->vfe_dev[ISP_VFE1]; \
45 mutex_unlock(&vfe1_dev->core_mutex); \
49 static uint64_t msm_isp_cpp_clk_rate;
51 #define VFE40_8974V2_VERSION 0x1001001A
53 void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
58 for (i = 0; i < 4; i++) {
59 text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
60 if ((text[i] < '0') || (text[i] > 'z')) {
61 pr_err("%s: Invalid output format %d (unprintable)\n",
62 origin, fourcc_format);
66 pr_err("%s: Invalid output format %s\n",
71 int msm_isp_init_bandwidth_mgr(struct vfe_device *vfe_dev,
72 enum msm_isp_hw_client client)
76 mutex_lock(&bandwidth_mgr_mutex);
77 if (isp_bandwidth_mgr.client_info[client].active) {
78 mutex_unlock(&bandwidth_mgr_mutex);
81 isp_bandwidth_mgr.client_info[client].active = 1;
82 isp_bandwidth_mgr.use_count++;
83 if (vfe_dev && !isp_bandwidth_mgr.bus_client) {
84 rc = vfe_dev->hw_info->vfe_ops.platform_ops.init_bw_mgr(vfe_dev,
87 isp_bandwidth_mgr.update_bw =
88 vfe_dev->hw_info->vfe_ops.platform_ops.update_bw;
89 isp_bandwidth_mgr.deinit_bw_mgr =
90 vfe_dev->hw_info->vfe_ops.platform_ops.deinit_bw_mgr;
94 isp_bandwidth_mgr.use_count--;
95 isp_bandwidth_mgr.client_info[client].active = 0;
98 mutex_unlock(&bandwidth_mgr_mutex);
102 int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
103 uint64_t ab, uint64_t ib)
107 mutex_lock(&bandwidth_mgr_mutex);
108 if (!isp_bandwidth_mgr.use_count ||
109 !isp_bandwidth_mgr.bus_client) {
110 pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
111 __func__, isp_bandwidth_mgr.use_count,
112 isp_bandwidth_mgr.bus_client);
113 mutex_unlock(&bandwidth_mgr_mutex);
117 isp_bandwidth_mgr.client_info[client].ab = ab;
118 isp_bandwidth_mgr.client_info[client].ib = ib;
119 rc = isp_bandwidth_mgr.update_bw(&isp_bandwidth_mgr);
120 mutex_unlock(&bandwidth_mgr_mutex);
124 void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
126 if (client >= MAX_ISP_CLIENT) {
127 pr_err("invalid Client id %d", client);
130 mutex_lock(&bandwidth_mgr_mutex);
131 memset(&isp_bandwidth_mgr.client_info[client], 0,
132 sizeof(struct msm_isp_bandwidth_info));
133 if (isp_bandwidth_mgr.use_count) {
134 isp_bandwidth_mgr.use_count--;
135 if (isp_bandwidth_mgr.use_count) {
136 mutex_unlock(&bandwidth_mgr_mutex);
140 if (!isp_bandwidth_mgr.bus_client) {
141 pr_err("%s:%d error: bus client invalid\n",
143 mutex_unlock(&bandwidth_mgr_mutex);
147 isp_bandwidth_mgr.deinit_bw_mgr(
150 mutex_unlock(&bandwidth_mgr_mutex);
153 void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
154 struct msm_isp_statistics *stats)
156 stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
157 stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
158 stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
160 stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
161 stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
162 stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
164 stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
165 stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
166 stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
167 stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
168 stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
169 stats->vfe_clk_rate = vfe_dev->vfe_clk_info[
170 vfe_dev->hw_info->vfe_clk_idx].clk_rate;
171 stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
174 void msm_isp_util_update_clk_rate(long clock_rate)
176 msm_isp_cpp_clk_rate = clock_rate;
179 uint32_t msm_isp_get_framedrop_period(
180 enum msm_vfe_frame_skip_pattern frame_skip_pattern)
182 switch (frame_skip_pattern) {
191 return frame_skip_pattern + 1;
206 void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
207 struct vfe_device *vfe_dev)
211 do_gettimeofday(&(time_stamp->event_time));
212 if (vfe_dev->vt_enable) {
213 msm_isp_get_avtimer_ts(time_stamp);
214 time_stamp->buf_time.tv_sec = time_stamp->vt_time.tv_sec;
215 time_stamp->buf_time.tv_usec = time_stamp->vt_time.tv_usec;
217 get_monotonic_boottime(&ts);
218 time_stamp->buf_time.tv_sec = ts.tv_sec;
219 time_stamp->buf_time.tv_usec = ts.tv_nsec/1000;
223 static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
225 u32 evt_id = ISP_EVENT_SUBS_MASK_NONE;
228 case ISP_EVENT_MASK_INDEX_STATS_NOTIFY:
229 evt_id = ISP_EVENT_STATS_NOTIFY;
231 case ISP_EVENT_MASK_INDEX_ERROR:
232 evt_id = ISP_EVENT_ERROR;
234 case ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT:
235 evt_id = ISP_EVENT_IOMMU_P_FAULT;
237 case ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE:
238 evt_id = ISP_EVENT_STREAM_UPDATE_DONE;
240 case ISP_EVENT_MASK_INDEX_REG_UPDATE:
241 evt_id = ISP_EVENT_REG_UPDATE;
243 case ISP_EVENT_MASK_INDEX_SOF:
244 evt_id = ISP_EVENT_SOF;
246 case ISP_EVENT_MASK_INDEX_BUF_DIVERT:
247 evt_id = ISP_EVENT_BUF_DIVERT;
249 case ISP_EVENT_MASK_INDEX_BUF_DONE:
250 evt_id = ISP_EVENT_BUF_DONE;
252 case ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY:
253 evt_id = ISP_EVENT_COMP_STATS_NOTIFY;
255 case ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE:
256 evt_id = ISP_EVENT_FE_READ_DONE;
258 case ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH:
259 evt_id = ISP_EVENT_PING_PONG_MISMATCH;
261 case ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING:
262 evt_id = ISP_EVENT_REG_UPDATE_MISSING;
264 case ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR:
265 evt_id = ISP_EVENT_BUF_FATAL_ERROR;
268 evt_id = ISP_EVENT_SUBS_MASK_NONE;
275 static inline int msm_isp_subscribe_event_mask(struct v4l2_fh *fh,
276 struct v4l2_event_subscription *sub, int evt_mask_index,
277 u32 evt_id, bool subscribe_flag)
279 int rc = 0, i, interface;
281 if (ISP_EVENT_MASK_INDEX_STATS_NOTIFY == evt_mask_index) {
282 for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
283 sub->type = evt_id + i;
285 rc = v4l2_event_subscribe(fh, sub,
286 MAX_ISP_V4l2_EVENTS, NULL);
288 rc = v4l2_event_unsubscribe(fh, sub);
290 pr_err("%s: Subs event_type =0x%x failed\n",
291 __func__, sub->type);
295 } else if (ISP_EVENT_MASK_INDEX_SOF == evt_mask_index ||
296 ISP_EVENT_MASK_INDEX_REG_UPDATE == evt_mask_index ||
297 ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE == evt_mask_index) {
298 for (interface = 0; interface < VFE_SRC_MAX; interface++) {
299 sub->type = evt_id | interface;
301 rc = v4l2_event_subscribe(fh, sub,
302 MAX_ISP_V4l2_EVENTS, NULL);
304 rc = v4l2_event_unsubscribe(fh, sub);
306 pr_err("%s: Subs event_type =0x%x failed\n",
307 __func__, sub->type);
314 rc = v4l2_event_subscribe(fh, sub,
315 MAX_ISP_V4l2_EVENTS, NULL);
317 rc = v4l2_event_unsubscribe(fh, sub);
319 pr_err("%s: Subs event_type =0x%x failed\n",
320 __func__, sub->type);
327 static inline int msm_isp_process_event_subscription(struct v4l2_fh *fh,
328 struct v4l2_event_subscription *sub, bool subscribe_flag)
330 int rc = 0, evt_mask_index = 0;
331 u32 evt_mask = sub->type;
334 if (ISP_EVENT_SUBS_MASK_NONE == evt_mask) {
335 pr_err("%s: Subs event_type is None=0x%x\n",
340 for (evt_mask_index = ISP_EVENT_MASK_INDEX_STATS_NOTIFY;
341 evt_mask_index <= ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR;
343 if (evt_mask & (1<<evt_mask_index)) {
344 evt_id = msm_isp_evt_mask_to_isp_event(evt_mask_index);
345 rc = msm_isp_subscribe_event_mask(fh, sub,
346 evt_mask_index, evt_id, subscribe_flag);
348 pr_err("%s: Subs event index:%d failed\n",
349 __func__, evt_mask_index);
357 int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
358 struct v4l2_event_subscription *sub)
360 return msm_isp_process_event_subscription(fh, sub, true);
363 int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
364 struct v4l2_event_subscription *sub)
366 return msm_isp_process_event_subscription(fh, sub, false);
369 static int msm_isp_start_fetch_engine(struct vfe_device *vfe_dev,
372 struct msm_vfe_fetch_eng_start *fe_cfg = arg;
374 * For Offline VFE, HAL expects same frame id
375 * for offline output which it requested in do_reprocess.
377 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
379 return vfe_dev->hw_info->vfe_ops.core_ops.
380 start_fetch_eng(vfe_dev, arg);
383 static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
386 struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
387 struct msm_vfe_axi_stream *stream_info = NULL;
389 uint32_t wm_reload_mask = 0;
392 * For Offline VFE, HAL expects same frame id
393 * for offline output which it requested in do_reprocess.
395 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
397 if (fe_cfg->offline_pass == OFFLINE_SECOND_PASS) {
398 stream_info = msm_isp_get_stream_common_data(vfe_dev,
399 HANDLE_TO_IDX(fe_cfg->output_stream_id));
400 if (stream_info == NULL) {
401 pr_err("%s: Error in Offline process\n", __func__);
404 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
405 msm_isp_reset_framedrop(vfe_dev, stream_info);
407 rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
408 VFE_PING_FLAG, fe_cfg->output_buf_idx);
410 pr_err("%s: Fetch engine config failed\n", __func__);
413 for (i = 0; i < stream_info->num_planes; i++)
414 wm_reload_mask |= (1 << stream_info->wm[vfe_idx][i]);
415 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
417 vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
418 vfe_dev->vfe_base, wm_reload_mask);
420 return vfe_dev->hw_info->vfe_ops.core_ops.
421 start_fetch_eng_multi_pass(vfe_dev, arg);
424 void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
425 struct msm_vfe_fetch_engine_info *fetch_engine_info)
427 struct msm_isp_event_data fe_rd_done_event;
428 memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data));
429 fe_rd_done_event.frame_id =
430 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
431 fe_rd_done_event.u.fetch_done.session_id =
432 fetch_engine_info->session_id;
433 fe_rd_done_event.u.fetch_done.stream_id = fetch_engine_info->stream_id;
434 fe_rd_done_event.u.fetch_done.handle = fetch_engine_info->bufq_handle;
435 fe_rd_done_event.u.fetch_done.buf_idx = fetch_engine_info->buf_idx;
436 fe_rd_done_event.u.fetch_done.fd = fetch_engine_info->fd;
437 fe_rd_done_event.u.fetch_done.offline_mode =
438 fetch_engine_info->offline_mode;
440 ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
441 __func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
442 fetch_engine_info->is_busy = 0;
443 msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
446 static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
447 struct msm_vfe_input_cfg *input_cfg)
450 struct msm_vfe_pix_cfg *pix_cfg = NULL;
452 if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
453 pr_err("%s: pixel path is active\n", __func__);
457 pix_cfg = &input_cfg->d.pix_cfg;
458 vfe_dev->hvx_cmd = pix_cfg->hvx_cmd;
459 vfe_dev->is_split = input_cfg->d.pix_cfg.is_split;
461 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
462 input_cfg->input_pix_clk;
463 vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
464 input_cfg->d.pix_cfg.input_mux;
465 vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
466 input_cfg->d.pix_cfg.input_format;
467 vfe_dev->axi_data.src_info[VFE_PIX_0].sof_counter_step = 1;
470 * Fill pixel_clock into input_pix_clk so that user space
471 * can use rounded clk rate
473 input_cfg->input_pix_clk =
474 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock;
476 ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
477 input_cfg->d.pix_cfg.input_mux, CAMIF,
478 input_cfg->d.pix_cfg.input_format);
480 if (input_cfg->d.pix_cfg.input_mux == CAMIF ||
481 input_cfg->d.pix_cfg.input_mux == TESTGEN) {
482 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
483 input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
484 if (input_cfg->d.pix_cfg.camif_cfg.subsample_cfg.
485 sof_counter_step > 0) {
486 vfe_dev->axi_data.src_info[VFE_PIX_0].
487 sof_counter_step = input_cfg->d.pix_cfg.
488 camif_cfg.subsample_cfg.sof_counter_step;
490 } else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
491 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
492 input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
494 vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
495 vfe_dev, &input_cfg->d.pix_cfg);
496 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_PIX_0);
500 static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
501 struct msm_vfe_input_cfg *input_cfg)
504 if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
505 pr_err("%s: RAW%d path is active\n", __func__,
506 input_cfg->input_src - VFE_RAW_0);
510 vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
511 input_cfg->input_pix_clk;
512 vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
513 vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
517 int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
520 struct msm_vfe_input_cfg *input_cfg = arg;
521 long pixel_clock = 0;
523 switch (input_cfg->input_src) {
525 rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
530 rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
533 pr_err("%s: Invalid input source\n", __func__);
537 pixel_clock = input_cfg->input_pix_clk;
539 * Only set rate to higher, do not lower higher
540 * rate needed by another input
542 if (pixel_clock > vfe_dev->vfe_clk_info[
543 vfe_dev->hw_info->vfe_clk_idx].clk_rate) {
544 rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(
548 pr_err("%s: clock set rate failed\n", __func__);
555 static int msm_isp_dual_hw_master_slave_sync(struct vfe_device *vfe_dev,
560 struct msm_isp_dual_hw_master_slave_sync *link = arg;
562 struct master_slave_resource_info *ms_res =
563 &vfe_dev->common_data->ms_resource;
565 struct msm_vfe_src_info *src_info = NULL;
568 &vfe_dev->common_data->common_dev_data_lock,
570 ms_res->dual_sync_mode = link->sync_mode;
571 if (ms_res->dual_sync_mode == MSM_ISP_DUAL_CAM_ASYNC) {
572 for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
573 if (ms_res->src_info[i] == NULL)
575 src_info = ms_res->src_info[i];
576 if (src_info->dual_hw_ms_info.sync_state ==
577 MSM_ISP_DUAL_CAM_ASYNC)
579 ms_res->active_src_mask &= ~(1 <<
580 src_info->dual_hw_ms_info.index);
581 ms_res->src_sof_mask &= ~(1 <<
582 src_info->dual_hw_ms_info.index);
583 src_info->dual_hw_ms_info.sync_state =
584 MSM_ISP_DUAL_CAM_ASYNC;
587 spin_unlock_irqrestore(
588 &vfe_dev->common_data->common_dev_data_lock,
593 static int msm_isp_set_dual_HW_master_slave_mode(
594 struct vfe_device *vfe_dev, void *arg)
597 struct msm_isp_set_dual_hw_ms_cmd *dual_hw_ms_cmd = NULL;
598 struct msm_vfe_src_info *src_info = NULL;
600 struct master_slave_resource_info *ms_res =
601 &vfe_dev->common_data->ms_resource;
603 if (!vfe_dev || !arg) {
604 pr_err("%s: Error! Invalid input vfe_dev %pK arg %pK\n",
605 __func__, vfe_dev, arg);
609 spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
610 dual_hw_ms_cmd = (struct msm_isp_set_dual_hw_ms_cmd *)arg;
611 vfe_dev->common_data->ms_resource.dual_hw_type = DUAL_HW_MASTER_SLAVE;
612 vfe_dev->vfe_ub_policy = MSM_WM_UB_EQUAL_SLICING;
613 if (dual_hw_ms_cmd->primary_intf < VFE_SRC_MAX) {
614 ISP_DBG("%s: vfe %d primary_intf %d\n", __func__,
615 vfe_dev->pdev->id, dual_hw_ms_cmd->primary_intf);
616 src_info = &vfe_dev->axi_data.
617 src_info[dual_hw_ms_cmd->primary_intf];
618 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
619 src_info->dual_hw_ms_info.dual_hw_ms_type =
620 dual_hw_ms_cmd->dual_hw_ms_type;
621 src_info->dual_hw_ms_info.index = dual_hw_ms_cmd->
622 primary_intf + VFE_SRC_MAX * vfe_dev->pdev->id;
623 ms_res->src_info[src_info->dual_hw_ms_info.index] = src_info;
625 if (dual_hw_ms_cmd->dual_hw_ms_type == MS_TYPE_MASTER) {
626 ms_res->master_index = src_info->dual_hw_ms_info.index;
627 ms_res->sof_delta_threshold =
628 dual_hw_ms_cmd->sof_delta_threshold;
630 ms_res->primary_slv_idx =
631 src_info->dual_hw_ms_info.index;
634 ISP_DBG("%s: vfe %d num_src %d\n", __func__, vfe_dev->pdev->id,
635 dual_hw_ms_cmd->num_src);
636 if (dual_hw_ms_cmd->num_src > VFE_SRC_MAX) {
637 pr_err("%s: Error! Invalid num_src %d\n", __func__,
638 dual_hw_ms_cmd->num_src);
639 spin_unlock_irqrestore(&vfe_dev->common_data->
640 common_dev_data_lock, flags);
643 /* This for loop is for non-primary intf to be marked with Master/Slave
644 * in order for frame id sync. But their timestamp is not saved.
645 * So no sof_info resource is allocated */
646 for (i = 0; i < dual_hw_ms_cmd->num_src; i++) {
647 if (dual_hw_ms_cmd->input_src[i] >= VFE_SRC_MAX) {
648 pr_err("%s: Error! Invalid SRC param %d\n", __func__,
649 dual_hw_ms_cmd->input_src[i]);
650 spin_unlock_irqrestore(&vfe_dev->common_data->
651 common_dev_data_lock, flags);
654 ISP_DBG("%s: vfe %d src %d type %d\n", __func__,
655 vfe_dev->pdev->id, dual_hw_ms_cmd->input_src[i],
656 dual_hw_ms_cmd->dual_hw_ms_type);
657 src_info = &vfe_dev->axi_data.
658 src_info[dual_hw_ms_cmd->input_src[i]];
659 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
660 src_info->dual_hw_ms_info.dual_hw_ms_type =
661 dual_hw_ms_cmd->dual_hw_ms_type;
662 src_info->dual_hw_ms_info.index = dual_hw_ms_cmd->
663 input_src[i] + VFE_SRC_MAX * vfe_dev->pdev->id;
664 ms_res->src_info[src_info->dual_hw_ms_info.index] = src_info;
667 spin_unlock_irqrestore(&vfe_dev->common_data->common_dev_data_lock,
672 static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
676 struct msm_vfe_cfg_cmd_list *proc_cmd =
677 (struct msm_vfe_cfg_cmd_list *)arg;
678 struct msm_vfe_cfg_cmd_list cmd, cmd_next;
680 if (!vfe_dev || !arg) {
681 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
686 rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
688 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
693 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
694 pr_err("%s:%d failed: next size %u != expected %zu\n",
695 __func__, __LINE__, cmd.next_size,
696 sizeof(struct msm_vfe_cfg_cmd_list));
699 if (++count >= MAX_ISP_REG_LIST) {
700 pr_err("%s:%d Error exceeding the max register count:%u\n",
701 __func__, __LINE__, count);
705 if (copy_from_user(&cmd_next, (void __user *)cmd.next,
706 sizeof(struct msm_vfe_cfg_cmd_list))) {
711 rc = msm_isp_proc_cmd(vfe_dev, &cmd_next.cfg_cmd);
713 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
721 struct msm_vfe_cfg_cmd2_32 {
724 compat_caddr_t cfg_data;
725 compat_caddr_t cfg_cmd;
728 struct msm_vfe_cfg_cmd_list_32 {
729 struct msm_vfe_cfg_cmd2_32 cfg_cmd;
734 #define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
735 _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
736 #define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
737 _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
739 static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
740 struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
742 proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
743 proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
744 proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
745 proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
748 static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
752 struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
753 (struct msm_vfe_cfg_cmd_list_32 *)arg;
754 struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
755 struct msm_vfe_cfg_cmd2 current_cmd;
757 if (!vfe_dev || !arg) {
758 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
762 msm_isp_compat_to_proc_cmd(¤t_cmd, &proc_cmd->cfg_cmd);
763 rc = msm_isp_proc_cmd(vfe_dev, ¤t_cmd);
765 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
769 while (NULL != compat_ptr(cmd.next)) {
770 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
771 pr_err("%s:%d failed: next size %u != expected %zu\n",
772 __func__, __LINE__, cmd.next_size,
773 sizeof(struct msm_vfe_cfg_cmd_list));
776 if (++count >= MAX_ISP_REG_LIST) {
777 pr_err("%s:%d Error exceeding the max register count:%u\n",
778 __func__, __LINE__, count);
782 if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
783 sizeof(struct msm_vfe_cfg_cmd_list_32))) {
788 msm_isp_compat_to_proc_cmd(¤t_cmd, &cmd_next.cfg_cmd);
789 rc = msm_isp_proc_cmd(vfe_dev, ¤t_cmd);
791 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
798 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
800 if (is_compat_task())
801 return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
803 return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
805 #else /* CONFIG_COMPAT */
806 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
808 return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
810 #endif /* CONFIG_COMPAT */
812 static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
813 unsigned int cmd, void *arg)
817 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
819 if (!vfe_dev || !vfe_dev->vfe_base) {
820 pr_err("%s:%d failed: invalid params %pK\n",
821 __func__, __LINE__, vfe_dev);
823 pr_err("%s:%d failed %pK\n", __func__,
824 __LINE__, vfe_dev->vfe_base);
828 /* use real time mutex for hard real-time ioctls such as
829 * buffer operations and register updates.
830 * Use core mutex for other ioctls that could take
831 * longer time to complete such as start/stop ISP streams
832 * which blocks until the hardware start/stop streaming
834 ISP_DBG("%s: cmd: %d\n", __func__, _IOC_TYPE(cmd));
836 case VIDIOC_MSM_VFE_REG_CFG: {
837 mutex_lock(&vfe_dev->realtime_mutex);
838 rc = msm_isp_proc_cmd(vfe_dev, arg);
839 mutex_unlock(&vfe_dev->realtime_mutex);
842 case VIDIOC_MSM_VFE_REG_LIST_CFG: {
843 mutex_lock(&vfe_dev->realtime_mutex);
844 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
845 mutex_unlock(&vfe_dev->realtime_mutex);
848 case VIDIOC_MSM_ISP_REQUEST_BUF:
849 case VIDIOC_MSM_ISP_REQUEST_BUF_VER2:
851 case VIDIOC_MSM_ISP_ENQUEUE_BUF:
853 case VIDIOC_MSM_ISP_DEQUEUE_BUF:
855 case VIDIOC_MSM_ISP_UNMAP_BUF: {
856 mutex_lock(&vfe_dev->buf_mgr->lock);
857 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
858 mutex_unlock(&vfe_dev->buf_mgr->lock);
861 case VIDIOC_MSM_ISP_RELEASE_BUF: {
862 if (vfe_dev->buf_mgr == NULL) {
863 pr_err("%s: buf mgr NULL! rc = -1\n", __func__);
867 mutex_lock(&vfe_dev->buf_mgr->lock);
868 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
869 mutex_unlock(&vfe_dev->buf_mgr->lock);
872 case VIDIOC_MSM_ISP_REQUEST_STREAM:
873 mutex_lock(&vfe_dev->core_mutex);
874 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
875 rc = msm_isp_request_axi_stream(vfe_dev, arg);
876 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
877 mutex_unlock(&vfe_dev->core_mutex);
879 case VIDIOC_MSM_ISP_RELEASE_STREAM:
880 mutex_lock(&vfe_dev->core_mutex);
881 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
882 rc = msm_isp_release_axi_stream(vfe_dev, arg);
883 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
884 mutex_unlock(&vfe_dev->core_mutex);
886 case VIDIOC_MSM_ISP_CFG_STREAM:
887 mutex_lock(&vfe_dev->core_mutex);
888 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
889 rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
890 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
891 mutex_unlock(&vfe_dev->core_mutex);
893 case VIDIOC_MSM_ISP_AXI_HALT:
894 mutex_lock(&vfe_dev->core_mutex);
895 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
896 rc = msm_isp_axi_halt(vfe_dev, arg);
897 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
898 mutex_unlock(&vfe_dev->core_mutex);
900 case VIDIOC_MSM_ISP_AXI_RESET:
901 mutex_lock(&vfe_dev->core_mutex);
902 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
903 if (atomic_read(&vfe_dev->error_info.overflow_state)
905 rc = msm_isp_stats_reset(vfe_dev);
906 rc2 = msm_isp_axi_reset(vfe_dev, arg);
910 pr_err_ratelimited("%s: no HW reset, halt enforced.\n",
913 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
914 mutex_unlock(&vfe_dev->core_mutex);
916 case VIDIOC_MSM_ISP_AXI_RESTART:
917 mutex_lock(&vfe_dev->core_mutex);
918 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
919 if (atomic_read(&vfe_dev->error_info.overflow_state)
921 rc = msm_isp_stats_restart(vfe_dev);
922 rc2 = msm_isp_axi_restart(vfe_dev, arg);
926 pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
929 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
930 mutex_unlock(&vfe_dev->core_mutex);
932 case VIDIOC_MSM_ISP_INPUT_CFG:
933 mutex_lock(&vfe_dev->core_mutex);
934 rc = msm_isp_cfg_input(vfe_dev, arg);
935 mutex_unlock(&vfe_dev->core_mutex);
937 case VIDIOC_MSM_ISP_AHB_CLK_CFG:
938 mutex_lock(&vfe_dev->core_mutex);
939 if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
940 rc = vfe_dev->hw_info->vfe_ops.core_ops.
941 ahb_clk_cfg(vfe_dev, arg);
944 mutex_unlock(&vfe_dev->core_mutex);
946 case VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE:
947 mutex_lock(&vfe_dev->core_mutex);
948 rc = msm_isp_set_dual_HW_master_slave_mode(vfe_dev, arg);
949 mutex_unlock(&vfe_dev->core_mutex);
951 case VIDIOC_MSM_ISP_DUAL_HW_MASTER_SLAVE_SYNC:
952 mutex_lock(&vfe_dev->core_mutex);
953 rc = msm_isp_dual_hw_master_slave_sync(vfe_dev, arg);
954 mutex_unlock(&vfe_dev->core_mutex);
956 case VIDIOC_MSM_ISP_DUAL_HW_LPM_MODE:
957 mutex_lock(&vfe_dev->core_mutex);
958 rc = msm_isp_ab_ib_update_lpm_mode(vfe_dev, arg);
959 mutex_unlock(&vfe_dev->core_mutex);
961 case VIDIOC_MSM_ISP_FETCH_ENG_START:
962 case VIDIOC_MSM_ISP_MAP_BUF_START_FE:
963 mutex_lock(&vfe_dev->core_mutex);
964 rc = msm_isp_start_fetch_engine(vfe_dev, arg);
965 mutex_unlock(&vfe_dev->core_mutex);
968 case VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START:
969 case VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE:
970 mutex_lock(&vfe_dev->core_mutex);
971 rc = msm_isp_start_fetch_engine_multi_pass(vfe_dev, arg);
972 mutex_unlock(&vfe_dev->core_mutex);
974 case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
976 enum msm_vfe_input_src frame_src =
977 *((enum msm_vfe_input_src *)arg);
978 vfe_dev->hw_info->vfe_ops.core_ops.
979 reg_update(vfe_dev, frame_src);
982 case VIDIOC_MSM_ISP_SET_SRC_STATE:
983 mutex_lock(&vfe_dev->core_mutex);
984 rc = msm_isp_set_src_state(vfe_dev, arg);
985 mutex_unlock(&vfe_dev->core_mutex);
987 case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
988 mutex_lock(&vfe_dev->core_mutex);
989 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
990 rc = msm_isp_request_stats_stream(vfe_dev, arg);
991 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
992 mutex_unlock(&vfe_dev->core_mutex);
994 case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
995 mutex_lock(&vfe_dev->core_mutex);
996 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
997 rc = msm_isp_release_stats_stream(vfe_dev, arg);
998 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
999 mutex_unlock(&vfe_dev->core_mutex);
1001 case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
1002 mutex_lock(&vfe_dev->core_mutex);
1003 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
1004 rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
1005 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
1006 mutex_unlock(&vfe_dev->core_mutex);
1008 case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
1009 mutex_lock(&vfe_dev->core_mutex);
1010 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
1011 rc = msm_isp_update_stats_stream(vfe_dev, arg);
1012 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
1013 mutex_unlock(&vfe_dev->core_mutex);
1015 case VIDIOC_MSM_ISP_UPDATE_STREAM:
1016 mutex_lock(&vfe_dev->core_mutex);
1017 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
1018 rc = msm_isp_update_axi_stream(vfe_dev, arg);
1019 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
1020 mutex_unlock(&vfe_dev->core_mutex);
1022 case VIDIOC_MSM_ISP_SMMU_ATTACH:
1023 mutex_lock(&vfe_dev->core_mutex);
1024 rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
1025 mutex_unlock(&vfe_dev->core_mutex);
1027 case MSM_SD_NOTIFY_FREEZE:
1028 vfe_dev->isp_sof_debug = 0;
1029 vfe_dev->isp_raw0_debug = 0;
1030 vfe_dev->isp_raw1_debug = 0;
1031 vfe_dev->isp_raw2_debug = 0;
1033 case MSM_SD_UNNOTIFY_FREEZE:
1034 case MSM_SD_SHUTDOWN:
1038 pr_err_ratelimited("%s: Invalid ISP command %d\n", __func__,
1046 #ifdef CONFIG_COMPAT
1047 static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
1048 unsigned int cmd, void *arg)
1050 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
1053 if (!vfe_dev || !vfe_dev->vfe_base) {
1054 pr_err("%s:%d failed: invalid params %pK\n",
1055 __func__, __LINE__, vfe_dev);
1057 pr_err("%s:%d failed %pK\n", __func__,
1058 __LINE__, vfe_dev->vfe_base);
1063 case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
1064 struct msm_vfe_cfg_cmd2 proc_cmd;
1065 mutex_lock(&vfe_dev->realtime_mutex);
1066 msm_isp_compat_to_proc_cmd(&proc_cmd,
1067 (struct msm_vfe_cfg_cmd2_32 *) arg);
1068 rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
1069 mutex_unlock(&vfe_dev->realtime_mutex);
1072 case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
1073 mutex_lock(&vfe_dev->realtime_mutex);
1074 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
1075 mutex_unlock(&vfe_dev->realtime_mutex);
1079 return msm_isp_ioctl_unlocked(sd, cmd, arg);
1085 long msm_isp_ioctl(struct v4l2_subdev *sd,
1086 unsigned int cmd, void *arg)
1088 return msm_isp_ioctl_compat(sd, cmd, arg);
1090 #else /* CONFIG_COMPAT */
1091 long msm_isp_ioctl(struct v4l2_subdev *sd,
1092 unsigned int cmd, void *arg)
1094 return msm_isp_ioctl_unlocked(sd, cmd, arg);
1096 #endif /* CONFIG_COMPAT */
1098 static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
1099 struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
1100 uint32_t *cfg_data, uint32_t cmd_len)
1102 if (!vfe_dev || !reg_cfg_cmd) {
1103 pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
1104 __LINE__, vfe_dev, reg_cfg_cmd);
1107 if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
1108 (!cfg_data || !cmd_len)) {
1109 pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
1110 __func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
1115 /* Validate input parameters */
1116 switch (reg_cfg_cmd->cmd_type) {
1119 case VFE_WRITE_MB: {
1120 if ((reg_cfg_cmd->u.rw_info.reg_offset >
1121 (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1122 ((reg_cfg_cmd->u.rw_info.reg_offset +
1123 reg_cfg_cmd->u.rw_info.len) >
1124 vfe_dev->vfe_base_size) ||
1125 (reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
1126 pr_err_ratelimited("%s:%d regoffset %d len %d res %d\n",
1128 reg_cfg_cmd->u.rw_info.reg_offset,
1129 reg_cfg_cmd->u.rw_info.len,
1130 (uint32_t)vfe_dev->vfe_base_size);
1134 if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
1135 (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1136 ((reg_cfg_cmd->u.rw_info.cmd_data_offset +
1137 reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
1138 pr_err_ratelimited("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
1140 reg_cfg_cmd->u.rw_info.cmd_data_offset,
1141 reg_cfg_cmd->u.rw_info.len, cmd_len);
1147 case VFE_WRITE_DMI_16BIT:
1148 case VFE_WRITE_DMI_32BIT:
1149 case VFE_WRITE_DMI_64BIT:
1150 case VFE_READ_DMI_16BIT:
1151 case VFE_READ_DMI_32BIT:
1152 case VFE_READ_DMI_64BIT: {
1153 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
1154 reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1155 if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
1156 reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
1157 (reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
1158 reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
1159 (sizeof(uint32_t)))) {
1160 pr_err("%s:%d hi %d lo %d\n",
1162 reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1163 reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
1166 if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
1167 pr_err("%s:%d len %d\n",
1169 reg_cfg_cmd->u.dmi_info.len);
1173 reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
1174 (reg_cfg_cmd->u.dmi_info.len -
1175 sizeof(uint32_t))) ||
1176 ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
1177 reg_cfg_cmd->u.dmi_info.len -
1178 sizeof(uint32_t)) > cmd_len)) {
1179 pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
1181 reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1182 reg_cfg_cmd->u.dmi_info.len, cmd_len);
1186 if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
1187 (UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
1188 ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
1189 reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
1190 pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
1192 reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
1193 reg_cfg_cmd->u.dmi_info.len, cmd_len);
1203 switch (reg_cfg_cmd->cmd_type) {
1205 msm_camera_io_memcpy(vfe_dev->vfe_base +
1206 reg_cfg_cmd->u.rw_info.reg_offset,
1207 cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
1208 reg_cfg_cmd->u.rw_info.len);
1211 case VFE_WRITE_MB: {
1212 msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
1213 reg_cfg_cmd->u.rw_info.reg_offset,
1214 cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
1215 reg_cfg_cmd->u.rw_info.len);
1218 case VFE_CFG_MASK: {
1221 unsigned long flags;
1222 if ((UINT_MAX - sizeof(temp) <
1223 reg_cfg_cmd->u.mask_info.reg_offset) ||
1224 (vfe_dev->vfe_base_size <
1225 reg_cfg_cmd->u.mask_info.reg_offset +
1227 (reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
1228 pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
1231 grab_lock = vfe_dev->hw_info->vfe_ops.core_ops.
1232 is_module_cfg_lock_needed(reg_cfg_cmd->
1233 u.mask_info.reg_offset);
1235 spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
1236 temp = msm_camera_io_r(vfe_dev->vfe_base +
1237 reg_cfg_cmd->u.mask_info.reg_offset);
1239 temp &= ~reg_cfg_cmd->u.mask_info.mask;
1240 temp |= reg_cfg_cmd->u.mask_info.val;
1241 msm_camera_io_w(temp, vfe_dev->vfe_base +
1242 reg_cfg_cmd->u.mask_info.reg_offset);
1244 spin_unlock_irqrestore(&vfe_dev->shared_data_lock,
1248 case VFE_WRITE_DMI_16BIT:
1249 case VFE_WRITE_DMI_32BIT:
1250 case VFE_WRITE_DMI_64BIT: {
1252 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1253 uint32_t hi_val, lo_val, lo_val1;
1254 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
1255 hi_tbl_ptr = cfg_data +
1256 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1258 lo_tbl_ptr = cfg_data +
1259 reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1260 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
1261 reg_cfg_cmd->u.dmi_info.len =
1262 reg_cfg_cmd->u.dmi_info.len / 2;
1263 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1264 lo_val = *lo_tbl_ptr++;
1265 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
1266 lo_val1 = lo_val & 0x0000FFFF;
1267 lo_val = (lo_val & 0xFFFF0000)>>16;
1268 msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
1269 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1270 } else if (reg_cfg_cmd->cmd_type ==
1271 VFE_WRITE_DMI_64BIT) {
1273 hi_val = *hi_tbl_ptr;
1274 hi_tbl_ptr = hi_tbl_ptr + 2;
1275 msm_camera_io_w(hi_val, vfe_dev->vfe_base +
1276 vfe_dev->hw_info->dmi_reg_offset);
1278 msm_camera_io_w(lo_val, vfe_dev->vfe_base +
1279 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1283 case VFE_READ_DMI_16BIT:
1284 case VFE_READ_DMI_32BIT:
1285 case VFE_READ_DMI_64BIT: {
1287 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1288 uint32_t hi_val, lo_val, lo_val1;
1289 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1290 hi_tbl_ptr = cfg_data +
1291 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1294 lo_tbl_ptr = cfg_data +
1295 reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1297 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
1298 reg_cfg_cmd->u.dmi_info.len =
1299 reg_cfg_cmd->u.dmi_info.len / 2;
1301 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1302 lo_val = msm_camera_io_r(vfe_dev->vfe_base +
1303 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1305 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
1306 lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
1307 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1308 lo_val |= lo_val1 << 16;
1310 *lo_tbl_ptr++ = lo_val;
1311 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1312 hi_val = msm_camera_io_r(vfe_dev->vfe_base +
1313 vfe_dev->hw_info->dmi_reg_offset);
1314 *hi_tbl_ptr = hi_val;
1321 case VFE_HW_UPDATE_LOCK: {
1322 uint32_t update_id =
1323 vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
1324 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
1325 || update_id == *cfg_data) {
1326 pr_err("%s hw update lock failed acq %d, cur id %u, last id %u\n",
1329 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
1335 case VFE_HW_UPDATE_UNLOCK: {
1336 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
1338 pr_err("hw update across frame boundary,begin id %u, end id %d\n",
1340 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
1342 vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
1343 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1348 uint32_t *data_ptr = cfg_data +
1349 reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
1350 for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
1351 if ((data_ptr < cfg_data) ||
1352 (UINT_MAX / sizeof(*data_ptr) <
1353 (data_ptr - cfg_data)) ||
1354 (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
1357 *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
1358 reg_cfg_cmd->u.rw_info.reg_offset);
1359 reg_cfg_cmd->u.rw_info.reg_offset += 4;
1363 case GET_MAX_CLK_RATE: {
1367 if (cmd_len != sizeof(__u32)) {
1368 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1369 __func__, __LINE__, cmd_len,
1373 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_max_clk_rate(
1376 pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1380 *(__u32 *)cfg_data = (__u32)rate;
1384 case GET_CLK_RATES: {
1386 struct msm_isp_clk_rates rates;
1387 struct msm_isp_clk_rates *user_data =
1388 (struct msm_isp_clk_rates *)cfg_data;
1389 if (cmd_len != sizeof(struct msm_isp_clk_rates)) {
1390 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1391 __func__, __LINE__, cmd_len,
1392 sizeof(struct msm_isp_clk_rates));
1395 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(
1398 pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1401 user_data->svs_rate = rates.svs_rate;
1402 user_data->nominal_rate = rates.nominal_rate;
1403 user_data->high_rate = rates.high_rate;
1407 uint32_t *isp_id = NULL;
1409 if (cmd_len < sizeof(uint32_t)) {
1410 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1411 __func__, __LINE__, cmd_len,
1416 isp_id = (uint32_t *)cfg_data;
1417 *isp_id = vfe_dev->pdev->id;
1420 case SET_WM_UB_SIZE:
1422 case SET_UB_POLICY: {
1424 if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
1425 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1426 __func__, __LINE__, cmd_len,
1427 sizeof(vfe_dev->vfe_ub_policy));
1430 vfe_dev->vfe_ub_policy = *cfg_data;
1433 case GET_VFE_HW_LIMIT: {
1434 uint32_t *hw_limit = NULL;
1436 if (cmd_len < sizeof(uint32_t)) {
1437 pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1438 __func__, __LINE__, cmd_len,
1443 hw_limit = (uint32_t *)cfg_data;
1444 *hw_limit = vfe_dev->vfe_hw_limit;
1451 int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
1454 struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
1455 struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
1456 uint32_t *cfg_data = NULL;
1458 if (!proc_cmd->num_cfg) {
1459 pr_err("%s: Passed num_cfg as 0\n", __func__);
1463 reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
1464 proc_cmd->num_cfg, GFP_KERNEL);
1466 pr_err("%s: reg_cfg alloc failed\n", __func__);
1468 goto reg_cfg_failed;
1471 if (copy_from_user(reg_cfg_cmd,
1472 (void __user *)(proc_cmd->cfg_cmd),
1473 sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
1475 goto copy_cmd_failed;
1478 if (proc_cmd->cmd_len > 0) {
1479 cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
1481 pr_err("%s: cfg_data alloc failed\n", __func__);
1483 goto cfg_data_failed;
1486 if (copy_from_user(cfg_data,
1487 (void __user *)(proc_cmd->cfg_data),
1488 proc_cmd->cmd_len)) {
1490 goto copy_cmd_failed;
1494 for (i = 0; i < proc_cmd->num_cfg; i++)
1495 rc = msm_isp_send_hw_cmd(vfe_dev, ®_cfg_cmd[i],
1496 cfg_data, proc_cmd->cmd_len);
1498 if (copy_to_user(proc_cmd->cfg_data,
1499 cfg_data, proc_cmd->cmd_len)) {
1501 goto copy_cmd_failed;
1512 int msm_isp_send_event(struct vfe_device *vfe_dev,
1513 uint32_t event_type,
1514 struct msm_isp_event_data *event_data)
1516 struct v4l2_event isp_event;
1517 memset(&isp_event, 0, sizeof(struct v4l2_event));
1519 isp_event.type = event_type;
1520 memcpy(&isp_event.u.data[0], event_data,
1521 sizeof(struct msm_isp_event_data));
1522 v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
1526 #define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
1528 int msm_isp_cal_word_per_line(uint32_t output_format,
1529 uint32_t pixel_per_line)
1532 switch (output_format) {
1533 case V4L2_PIX_FMT_SBGGR8:
1534 case V4L2_PIX_FMT_SGBRG8:
1535 case V4L2_PIX_FMT_SGRBG8:
1536 case V4L2_PIX_FMT_SRGGB8:
1537 case V4L2_PIX_FMT_QBGGR8:
1538 case V4L2_PIX_FMT_QGBRG8:
1539 case V4L2_PIX_FMT_QGRBG8:
1540 case V4L2_PIX_FMT_QRGGB8:
1541 case V4L2_PIX_FMT_JPEG:
1542 case V4L2_PIX_FMT_META:
1543 case V4L2_PIX_FMT_GREY:
1544 val = CAL_WORD(pixel_per_line, 1, 8);
1546 case V4L2_PIX_FMT_SBGGR10:
1547 case V4L2_PIX_FMT_SGBRG10:
1548 case V4L2_PIX_FMT_SGRBG10:
1549 case V4L2_PIX_FMT_SRGGB10:
1550 case V4L2_PIX_FMT_SBGGR10DPCM6:
1551 case V4L2_PIX_FMT_SGBRG10DPCM6:
1552 case V4L2_PIX_FMT_SGRBG10DPCM6:
1553 case V4L2_PIX_FMT_SRGGB10DPCM6:
1554 case V4L2_PIX_FMT_SBGGR10DPCM8:
1555 case V4L2_PIX_FMT_SGBRG10DPCM8:
1556 case V4L2_PIX_FMT_SGRBG10DPCM8:
1557 case V4L2_PIX_FMT_SRGGB10DPCM8:
1558 case V4L2_PIX_FMT_META10:
1559 val = CAL_WORD(pixel_per_line, 5, 32);
1561 case V4L2_PIX_FMT_SBGGR12:
1562 case V4L2_PIX_FMT_SGBRG12:
1563 case V4L2_PIX_FMT_SGRBG12:
1564 case V4L2_PIX_FMT_SRGGB12:
1565 val = CAL_WORD(pixel_per_line, 3, 16);
1567 case V4L2_PIX_FMT_SBGGR14:
1568 case V4L2_PIX_FMT_SGBRG14:
1569 case V4L2_PIX_FMT_SGRBG14:
1570 case V4L2_PIX_FMT_SRGGB14:
1571 val = CAL_WORD(pixel_per_line, 7, 32);
1573 case V4L2_PIX_FMT_QBGGR10:
1574 case V4L2_PIX_FMT_QGBRG10:
1575 case V4L2_PIX_FMT_QGRBG10:
1576 case V4L2_PIX_FMT_QRGGB10:
1577 val = CAL_WORD(pixel_per_line, 1, 6);
1579 case V4L2_PIX_FMT_QBGGR12:
1580 case V4L2_PIX_FMT_QGBRG12:
1581 case V4L2_PIX_FMT_QGRBG12:
1582 case V4L2_PIX_FMT_QRGGB12:
1583 val = CAL_WORD(pixel_per_line, 1, 5);
1585 case V4L2_PIX_FMT_QBGGR14:
1586 case V4L2_PIX_FMT_QGBRG14:
1587 case V4L2_PIX_FMT_QGRBG14:
1588 case V4L2_PIX_FMT_QRGGB14:
1589 val = CAL_WORD(pixel_per_line, 1, 4);
1591 case V4L2_PIX_FMT_NV12:
1592 case V4L2_PIX_FMT_NV21:
1593 case V4L2_PIX_FMT_NV14:
1594 case V4L2_PIX_FMT_NV41:
1595 case V4L2_PIX_FMT_NV16:
1596 case V4L2_PIX_FMT_NV61:
1597 val = CAL_WORD(pixel_per_line, 1, 8);
1599 case V4L2_PIX_FMT_YUYV:
1600 case V4L2_PIX_FMT_YVYU:
1601 case V4L2_PIX_FMT_UYVY:
1602 case V4L2_PIX_FMT_VYUY:
1603 val = CAL_WORD(pixel_per_line, 2, 8);
1605 case V4L2_PIX_FMT_P16BGGR10:
1606 case V4L2_PIX_FMT_P16GBRG10:
1607 case V4L2_PIX_FMT_P16GRBG10:
1608 case V4L2_PIX_FMT_P16RGGB10:
1609 val = CAL_WORD(pixel_per_line, 1, 4);
1611 case V4L2_PIX_FMT_NV24:
1612 case V4L2_PIX_FMT_NV42:
1613 val = CAL_WORD(pixel_per_line, 1, 8);
1615 /*TD: Add more image format*/
1617 msm_isp_print_fourcc_error(__func__, output_format);
1623 enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
1625 switch (output_format) {
1626 case V4L2_PIX_FMT_SBGGR8:
1627 case V4L2_PIX_FMT_SGBRG8:
1628 case V4L2_PIX_FMT_SGRBG8:
1629 case V4L2_PIX_FMT_SRGGB8:
1630 case V4L2_PIX_FMT_SBGGR10:
1631 case V4L2_PIX_FMT_SGBRG10:
1632 case V4L2_PIX_FMT_SGRBG10:
1633 case V4L2_PIX_FMT_SRGGB10:
1634 case V4L2_PIX_FMT_SBGGR10DPCM6:
1635 case V4L2_PIX_FMT_SGBRG10DPCM6:
1636 case V4L2_PIX_FMT_SGRBG10DPCM6:
1637 case V4L2_PIX_FMT_SRGGB10DPCM6:
1638 case V4L2_PIX_FMT_SBGGR10DPCM8:
1639 case V4L2_PIX_FMT_SGBRG10DPCM8:
1640 case V4L2_PIX_FMT_SGRBG10DPCM8:
1641 case V4L2_PIX_FMT_SRGGB10DPCM8:
1642 case V4L2_PIX_FMT_SBGGR12:
1643 case V4L2_PIX_FMT_SGBRG12:
1644 case V4L2_PIX_FMT_SGRBG12:
1645 case V4L2_PIX_FMT_SRGGB12:
1646 case V4L2_PIX_FMT_SBGGR14:
1647 case V4L2_PIX_FMT_SGBRG14:
1648 case V4L2_PIX_FMT_SGRBG14:
1649 case V4L2_PIX_FMT_SRGGB14:
1651 case V4L2_PIX_FMT_QBGGR8:
1652 case V4L2_PIX_FMT_QGBRG8:
1653 case V4L2_PIX_FMT_QGRBG8:
1654 case V4L2_PIX_FMT_QRGGB8:
1655 case V4L2_PIX_FMT_QBGGR10:
1656 case V4L2_PIX_FMT_QGBRG10:
1657 case V4L2_PIX_FMT_QGRBG10:
1658 case V4L2_PIX_FMT_QRGGB10:
1659 case V4L2_PIX_FMT_QBGGR12:
1660 case V4L2_PIX_FMT_QGBRG12:
1661 case V4L2_PIX_FMT_QGRBG12:
1662 case V4L2_PIX_FMT_QRGGB12:
1663 case V4L2_PIX_FMT_QBGGR14:
1664 case V4L2_PIX_FMT_QGBRG14:
1665 case V4L2_PIX_FMT_QGRBG14:
1666 case V4L2_PIX_FMT_QRGGB14:
1668 case V4L2_PIX_FMT_P16BGGR10:
1669 case V4L2_PIX_FMT_P16GBRG10:
1670 case V4L2_PIX_FMT_P16GRBG10:
1671 case V4L2_PIX_FMT_P16RGGB10:
1674 msm_isp_print_fourcc_error(__func__, output_format);
1680 int msm_isp_get_bit_per_pixel(uint32_t output_format)
1682 switch (output_format) {
1683 case V4L2_PIX_FMT_Y4:
1685 case V4L2_PIX_FMT_Y6:
1687 case V4L2_PIX_FMT_SBGGR8:
1688 case V4L2_PIX_FMT_SGBRG8:
1689 case V4L2_PIX_FMT_SGRBG8:
1690 case V4L2_PIX_FMT_SRGGB8:
1691 case V4L2_PIX_FMT_QBGGR8:
1692 case V4L2_PIX_FMT_QGBRG8:
1693 case V4L2_PIX_FMT_QGRBG8:
1694 case V4L2_PIX_FMT_QRGGB8:
1695 case V4L2_PIX_FMT_JPEG:
1696 case V4L2_PIX_FMT_META:
1697 case V4L2_PIX_FMT_NV12:
1698 case V4L2_PIX_FMT_NV21:
1699 case V4L2_PIX_FMT_NV14:
1700 case V4L2_PIX_FMT_NV41:
1701 case V4L2_PIX_FMT_YVU410:
1702 case V4L2_PIX_FMT_YVU420:
1703 case V4L2_PIX_FMT_YUYV:
1704 case V4L2_PIX_FMT_YYUV:
1705 case V4L2_PIX_FMT_YVYU:
1706 case V4L2_PIX_FMT_UYVY:
1707 case V4L2_PIX_FMT_VYUY:
1708 case V4L2_PIX_FMT_YUV422P:
1709 case V4L2_PIX_FMT_YUV411P:
1710 case V4L2_PIX_FMT_Y41P:
1711 case V4L2_PIX_FMT_YUV444:
1712 case V4L2_PIX_FMT_YUV555:
1713 case V4L2_PIX_FMT_YUV565:
1714 case V4L2_PIX_FMT_YUV32:
1715 case V4L2_PIX_FMT_YUV410:
1716 case V4L2_PIX_FMT_YUV420:
1717 case V4L2_PIX_FMT_GREY:
1718 case V4L2_PIX_FMT_PAL8:
1719 case V4L2_PIX_FMT_UV8:
1720 case MSM_V4L2_PIX_FMT_META:
1722 case V4L2_PIX_FMT_SBGGR10:
1723 case V4L2_PIX_FMT_SGBRG10:
1724 case V4L2_PIX_FMT_SGRBG10:
1725 case V4L2_PIX_FMT_SRGGB10:
1726 case V4L2_PIX_FMT_SBGGR10DPCM6:
1727 case V4L2_PIX_FMT_SGBRG10DPCM6:
1728 case V4L2_PIX_FMT_SGRBG10DPCM6:
1729 case V4L2_PIX_FMT_SRGGB10DPCM6:
1730 case V4L2_PIX_FMT_SBGGR10DPCM8:
1731 case V4L2_PIX_FMT_SGBRG10DPCM8:
1732 case V4L2_PIX_FMT_SGRBG10DPCM8:
1733 case V4L2_PIX_FMT_SRGGB10DPCM8:
1734 case V4L2_PIX_FMT_QBGGR10:
1735 case V4L2_PIX_FMT_QGBRG10:
1736 case V4L2_PIX_FMT_QGRBG10:
1737 case V4L2_PIX_FMT_QRGGB10:
1738 case V4L2_PIX_FMT_Y10:
1739 case V4L2_PIX_FMT_Y10BPACK:
1740 case V4L2_PIX_FMT_P16BGGR10:
1741 case V4L2_PIX_FMT_P16GBRG10:
1742 case V4L2_PIX_FMT_P16GRBG10:
1743 case V4L2_PIX_FMT_P16RGGB10:
1744 case V4L2_PIX_FMT_META10:
1745 case MSM_V4L2_PIX_FMT_META10:
1747 case V4L2_PIX_FMT_SBGGR12:
1748 case V4L2_PIX_FMT_SGBRG12:
1749 case V4L2_PIX_FMT_SGRBG12:
1750 case V4L2_PIX_FMT_SRGGB12:
1751 case V4L2_PIX_FMT_QBGGR12:
1752 case V4L2_PIX_FMT_QGBRG12:
1753 case V4L2_PIX_FMT_QGRBG12:
1754 case V4L2_PIX_FMT_QRGGB12:
1755 case V4L2_PIX_FMT_Y12:
1757 case V4L2_PIX_FMT_SBGGR14:
1758 case V4L2_PIX_FMT_SGBRG14:
1759 case V4L2_PIX_FMT_SGRBG14:
1760 case V4L2_PIX_FMT_SRGGB14:
1761 case V4L2_PIX_FMT_QBGGR14:
1762 case V4L2_PIX_FMT_QGBRG14:
1763 case V4L2_PIX_FMT_QGRBG14:
1764 case V4L2_PIX_FMT_QRGGB14:
1766 case V4L2_PIX_FMT_NV16:
1767 case V4L2_PIX_FMT_NV61:
1768 case V4L2_PIX_FMT_Y16:
1770 case V4L2_PIX_FMT_NV24:
1771 case V4L2_PIX_FMT_NV42:
1773 /*TD: Add more image format*/
1775 msm_isp_print_fourcc_error(__func__, output_format);
1776 pr_err("%s: Invalid output format %x\n",
1777 __func__, output_format);
1782 void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
1784 struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1785 error_info->info_dump_frame_count++;
1789 static int msm_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
1791 int rc = vfe_dev->buf_mgr->pagefault_debug_disable;
1792 uint32_t irq_status0, irq_status1;
1793 uint32_t overflow_mask;
1794 unsigned long irq_flags;
1796 /* Check if any overflow bit is set */
1797 vfe_dev->hw_info->vfe_ops.core_ops.
1798 get_overflow_mask(&overflow_mask);
1799 vfe_dev->hw_info->vfe_ops.irq_ops.
1800 read_irq_status(vfe_dev, &irq_status0, &irq_status1);
1801 overflow_mask &= irq_status1;
1803 &vfe_dev->common_data->common_dev_data_lock, irq_flags);
1804 if (overflow_mask ||
1805 atomic_read(&vfe_dev->error_info.overflow_state) !=
1807 spin_unlock_irqrestore(
1808 &vfe_dev->common_data->common_dev_data_lock, irq_flags);
1809 pr_err_ratelimited("%s: overflow detected during IOMMU\n",
1811 /* Don't treat the Overflow + Page fault scenario as fatal.
1812 * Instead try to do a recovery. Using an existing event as
1813 * as opposed to creating a new event.
1815 msm_isp_halt_send_error(vfe_dev, ISP_EVENT_PING_PONG_MISMATCH);
1817 spin_unlock_irqrestore(
1818 &vfe_dev->common_data->common_dev_data_lock, irq_flags);
1819 pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %pK\n",
1820 __func__, __LINE__, vfe_dev->pdev->id, vfe_dev);
1821 vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0);
1822 msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
1825 if (vfe_dev->buf_mgr->pagefault_debug_disable == 0) {
1826 vfe_dev->buf_mgr->pagefault_debug_disable = 1;
1827 vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
1828 vfe_dev->page_fault_addr);
1829 msm_isp_print_ping_pong_address(vfe_dev,
1830 vfe_dev->page_fault_addr);
1831 vfe_dev->hw_info->vfe_ops.axi_ops.
1832 read_wm_ping_pong_addr(vfe_dev);
1837 void msm_isp_process_error_info(struct vfe_device *vfe_dev)
1839 struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1841 if (error_info->error_count == 1 ||
1842 !(error_info->info_dump_frame_count % 100)) {
1843 vfe_dev->hw_info->vfe_ops.core_ops.
1844 process_error_status(vfe_dev);
1845 error_info->error_mask0 = 0;
1846 error_info->error_mask1 = 0;
1847 error_info->camif_status = 0;
1848 error_info->violation_status = 0;
1852 static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
1853 uint32_t error_mask0, uint32_t error_mask1)
1855 vfe_dev->error_info.error_mask0 |= error_mask0;
1856 vfe_dev->error_info.error_mask1 |= error_mask1;
1857 vfe_dev->error_info.error_count++;
1860 int msm_isp_process_overflow_irq(
1861 struct vfe_device *vfe_dev,
1862 uint32_t *irq_status0, uint32_t *irq_status1,
1863 uint8_t force_overflow)
1865 uint32_t overflow_mask;
1866 uint32_t bus_err = 0;
1867 unsigned long flags;
1869 /* if there are no active streams - do not start recovery */
1870 if (!vfe_dev->axi_data.num_active_stream)
1873 if (vfe_dev->hw_info->vfe_ops.core_ops.
1875 vfe_dev->hw_info->vfe_ops.core_ops.get_bus_err_mask(
1876 vfe_dev, &bus_err, irq_status1);
1877 /* Mask out all other irqs if recovery is started */
1878 if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
1879 uint32_t halt_restart_mask0, halt_restart_mask1;
1880 vfe_dev->hw_info->vfe_ops.core_ops.
1881 get_halt_restart_mask(&halt_restart_mask0,
1882 &halt_restart_mask1);
1883 *irq_status0 &= halt_restart_mask0;
1884 *irq_status1 &= halt_restart_mask1;
1889 /* Check if any overflow bit is set */
1890 vfe_dev->hw_info->vfe_ops.core_ops.
1891 get_overflow_mask(&overflow_mask);
1892 overflow_mask &= *irq_status1;
1894 if (overflow_mask || force_overflow) {
1895 struct msm_isp_event_data error_event;
1897 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
1900 &vfe_dev->common_data->common_dev_data_lock, flags);
1902 if (atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
1903 NO_OVERFLOW, OVERFLOW_DETECTED)) {
1904 spin_unlock_irqrestore(
1905 &vfe_dev->common_data->common_dev_data_lock,
1910 if (vfe_dev->reset_pending == 1) {
1911 pr_err_ratelimited("%s:%d overflow %x during reset\n",
1912 __func__, __LINE__, overflow_mask);
1913 /* Clear overflow bits since reset is pending */
1914 *irq_status1 &= ~overflow_mask;
1915 spin_unlock_irqrestore(
1916 &vfe_dev->common_data->common_dev_data_lock,
1920 pr_err_ratelimited("%s: vfe %d overflowmask %x,bus_error %x\n",
1921 __func__, vfe_dev->pdev->id, overflow_mask, bus_err);
1922 for (i = 0; i < axi_data->hw_info->num_wm; i++) {
1923 if (!axi_data->free_wm[i])
1925 ISP_DBG("%s:wm %d assigned to stream handle %x\n",
1926 __func__, i, axi_data->free_wm[i]);
1928 vfe_dev->recovery_irq0_mask = vfe_dev->irq0_mask;
1929 vfe_dev->recovery_irq1_mask = vfe_dev->irq1_mask;
1930 vfe_dev->hw_info->vfe_ops.core_ops.
1931 set_halt_restart_mask(vfe_dev);
1932 vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0);
1933 /* mask off other vfe if dual vfe is used */
1934 if (vfe_dev->is_split) {
1936 struct vfe_device *temp_vfe;
1938 other_vfe_id = (vfe_dev->pdev->id == ISP_VFE0) ?
1939 ISP_VFE1 : ISP_VFE0;
1940 temp_vfe = vfe_dev->common_data->
1941 dual_vfe_res->vfe_dev[other_vfe_id];
1943 atomic_set(&temp_vfe->error_info.overflow_state,
1945 temp_vfe->recovery_irq0_mask = temp_vfe->irq0_mask;
1946 temp_vfe->recovery_irq1_mask = temp_vfe->irq1_mask;
1947 temp_vfe->hw_info->vfe_ops.core_ops.
1948 set_halt_restart_mask(temp_vfe);
1949 temp_vfe->hw_info->vfe_ops.axi_ops.halt(temp_vfe, 0);
1952 /* reset irq status so skip further process */
1956 if (atomic_read(&vfe_dev->error_info.overflow_state)
1958 memset(&error_event, 0, sizeof(error_event));
1959 error_event.frame_id =
1960 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1961 error_event.u.error_info.err_type =
1962 ISP_ERROR_BUS_OVERFLOW;
1963 msm_isp_send_event(vfe_dev,
1964 ISP_EVENT_ERROR, &error_event);
1966 spin_unlock_irqrestore(
1967 &vfe_dev->common_data->common_dev_data_lock,
1974 void msm_isp_reset_burst_count_and_frame_drop(
1975 struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
1977 if (stream_info->state != ACTIVE ||
1978 stream_info->stream_type != BURST_STREAM) {
1981 if (stream_info->num_burst_capture != 0)
1982 msm_isp_reset_framedrop(vfe_dev, stream_info);
1985 void msm_isp_prepare_irq_debug_info(struct vfe_device *vfe_dev,
1986 uint32_t irq_status0, uint32_t irq_status1)
1989 unsigned long flags;
1990 struct msm_vfe_irq_debug_info *irq_debug;
1991 uint8_t current_index;
1993 spin_lock_irqsave(&vfe_dev->common_data->vfe_irq_dump.
1994 common_dev_irq_dump_lock, flags);
1995 /* Fill current VFE debug info */
1996 current_index = vfe_dev->common_data->vfe_irq_dump.
1997 current_irq_index % MAX_VFE_IRQ_DEBUG_DUMP_SIZE;
1998 irq_debug = &vfe_dev->common_data->vfe_irq_dump.
1999 irq_debug[current_index];
2000 irq_debug->vfe_id = vfe_dev->pdev->id;
2001 irq_debug->core_id = smp_processor_id();
2002 msm_isp_get_timestamp(&irq_debug->ts, vfe_dev);
2003 irq_debug->irq_status0[vfe_dev->pdev->id] = irq_status0;
2004 irq_debug->irq_status1[vfe_dev->pdev->id] = irq_status1;
2005 irq_debug->ping_pong_status[vfe_dev->pdev->id] =
2006 vfe_dev->hw_info->vfe_ops.axi_ops.
2007 get_pingpong_status(vfe_dev);
2008 if (vfe_dev->is_split &&
2009 (vfe_dev->common_data->
2010 dual_vfe_res->vfe_dev[!vfe_dev->pdev->id])
2011 && (vfe_dev->common_data->dual_vfe_res->
2012 vfe_dev[!vfe_dev->pdev->id]->vfe_open_cnt)) {
2013 /* Fill other VFE debug Info */
2014 vfe_dev->hw_info->vfe_ops.irq_ops.read_irq_status(
2015 vfe_dev->common_data->dual_vfe_res->
2016 vfe_dev[!vfe_dev->pdev->id],
2017 &irq_debug->irq_status0[!vfe_dev->pdev->id],
2018 &irq_debug->irq_status1[!vfe_dev->pdev->id]);
2019 irq_debug->ping_pong_status[!vfe_dev->pdev->id] =
2020 vfe_dev->hw_info->vfe_ops.axi_ops.
2021 get_pingpong_status(vfe_dev->common_data->
2022 dual_vfe_res->vfe_dev[!vfe_dev->pdev->id]);
2024 vfe_dev->common_data->vfe_irq_dump.current_irq_index++;
2025 spin_unlock_irqrestore(&vfe_dev->common_data->vfe_irq_dump.
2026 common_dev_irq_dump_lock, flags);
2029 void msm_isp_prepare_tasklet_debug_info(struct vfe_device *vfe_dev,
2030 uint32_t irq_status0, uint32_t irq_status1,
2031 struct msm_isp_timestamp ts)
2033 struct msm_vfe_irq_debug_info *irq_debug;
2034 uint8_t current_index;
2035 unsigned long flags;
2037 spin_lock_irqsave(&vfe_dev->common_data->vfe_irq_dump.
2038 common_dev_tasklet_dump_lock, flags);
2039 current_index = vfe_dev->common_data->vfe_irq_dump.
2040 current_tasklet_index % MAX_VFE_IRQ_DEBUG_DUMP_SIZE;
2041 irq_debug = &vfe_dev->common_data->vfe_irq_dump.
2042 tasklet_debug[current_index];
2043 irq_debug->vfe_id = vfe_dev->pdev->id;
2044 irq_debug->core_id = smp_processor_id();
2046 irq_debug->irq_status0[vfe_dev->pdev->id] = irq_status0;
2047 irq_debug->irq_status1[vfe_dev->pdev->id] = irq_status1;
2048 irq_debug->ping_pong_status[vfe_dev->pdev->id] =
2049 vfe_dev->hw_info->vfe_ops.axi_ops.
2050 get_pingpong_status(vfe_dev);
2051 vfe_dev->common_data->vfe_irq_dump.current_tasklet_index++;
2052 spin_unlock_irqrestore(&vfe_dev->common_data->vfe_irq_dump.
2053 common_dev_tasklet_dump_lock, flags);
2056 static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
2057 uint32_t irq_status0, uint32_t irq_status1)
2059 unsigned long flags;
2060 struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
2061 struct msm_vfe_tasklet *tasklet;
2063 if (vfe_dev->is_split)
2064 tasklet = &vfe_dev->common_data->tasklets[MAX_VFE];
2066 tasklet = &vfe_dev->common_data->tasklets[vfe_dev->pdev->id];
2068 spin_lock_irqsave(&tasklet->tasklet_lock, flags);
2069 queue_cmd = &tasklet->tasklet_queue_cmd[tasklet->taskletq_idx];
2070 if (queue_cmd->cmd_used) {
2071 pr_err_ratelimited("%s: Tasklet queue overflow: %d\n",
2072 __func__, vfe_dev->pdev->id);
2073 spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2076 atomic_add(1, &vfe_dev->irq_cnt);
2078 queue_cmd->vfeInterruptStatus0 = irq_status0;
2079 queue_cmd->vfeInterruptStatus1 = irq_status1;
2080 msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
2082 queue_cmd->cmd_used = 1;
2083 queue_cmd->vfe_dev = vfe_dev;
2085 tasklet->taskletq_idx = (tasklet->taskletq_idx + 1) %
2086 MSM_VFE_TASKLETQ_SIZE;
2087 list_add_tail(&queue_cmd->list, &tasklet->tasklet_q);
2088 spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2089 tasklet_schedule(&tasklet->tasklet);
2092 irqreturn_t msm_isp_process_irq(int irq_num, void *data)
2094 struct vfe_device *vfe_dev = (struct vfe_device *) data;
2095 uint32_t irq_status0, irq_status1;
2096 uint32_t error_mask0, error_mask1;
2098 vfe_dev->hw_info->vfe_ops.irq_ops.
2099 read_and_clear_irq_status(vfe_dev, &irq_status0, &irq_status1);
2101 if ((irq_status0 == 0) && (irq_status1 == 0)) {
2102 ISP_DBG("%s:VFE%d irq_status0 & 1 are both 0\n",
2103 __func__, vfe_dev->pdev->id);
2106 if (vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq) {
2107 vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq(
2108 vfe_dev, irq_status0);
2110 if (msm_isp_process_overflow_irq(vfe_dev,
2111 &irq_status0, &irq_status1, 0)) {
2112 /* if overflow initiated no need to handle the interrupts */
2113 pr_err("overflow processed\n");
2117 vfe_dev->hw_info->vfe_ops.core_ops.
2118 get_error_mask(&error_mask0, &error_mask1);
2119 error_mask0 &= irq_status0;
2120 error_mask1 &= irq_status1;
2121 irq_status0 &= ~error_mask0;
2122 irq_status1 &= ~error_mask1;
2123 if ((error_mask0 != 0) || (error_mask1 != 0))
2124 msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
2126 if ((irq_status0 == 0) && (irq_status1 == 0) &&
2127 (!(((error_mask0 != 0) || (error_mask1 != 0)) &&
2128 vfe_dev->error_info.error_count == 1))) {
2129 ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
2132 msm_isp_prepare_irq_debug_info(vfe_dev, irq_status0, irq_status1);
2133 msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1);
2138 void msm_isp_do_tasklet(unsigned long data)
2140 unsigned long flags;
2141 struct msm_vfe_tasklet *tasklet = (struct msm_vfe_tasklet *)data;
2142 struct vfe_device *vfe_dev;
2143 struct msm_vfe_irq_ops *irq_ops;
2144 struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2145 struct msm_isp_timestamp ts;
2146 uint32_t irq_status0, irq_status1;
2149 spin_lock_irqsave(&tasklet->tasklet_lock, flags);
2150 queue_cmd = list_first_entry_or_null(&tasklet->tasklet_q,
2151 struct msm_vfe_tasklet_queue_cmd, list);
2153 spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2156 list_del_init(&queue_cmd->list);
2157 vfe_dev = queue_cmd->vfe_dev;
2158 queue_cmd->cmd_used = 0;
2159 queue_cmd->vfe_dev = NULL;
2160 irq_status0 = queue_cmd->vfeInterruptStatus0;
2161 irq_status1 = queue_cmd->vfeInterruptStatus1;
2163 spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2164 if (vfe_dev->vfe_open_cnt == 0) {
2165 pr_err("%s: VFE%d open cnt = %d, irq %x/%x\n",
2166 __func__, vfe_dev->pdev->id, vfe_dev->vfe_open_cnt,
2167 irq_status0, irq_status1);
2170 atomic_sub(1, &vfe_dev->irq_cnt);
2171 msm_isp_prepare_tasklet_debug_info(vfe_dev,
2172 irq_status0, irq_status1, ts);
2173 irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
2174 irq_ops->process_reset_irq(vfe_dev,
2175 irq_status0, irq_status1);
2176 irq_ops->process_halt_irq(vfe_dev,
2177 irq_status0, irq_status1);
2178 if (atomic_read(&vfe_dev->error_info.overflow_state)
2180 ISP_DBG("%s: Recovery in processing, Ignore IRQs!!!\n",
2184 msm_isp_process_error_info(vfe_dev);
2185 irq_ops->process_stats_irq(vfe_dev,
2186 irq_status0, irq_status1, &ts);
2187 irq_ops->process_axi_irq(vfe_dev,
2188 irq_status0, irq_status1, &ts);
2189 irq_ops->process_camif_irq(vfe_dev,
2190 irq_status0, irq_status1, &ts);
2191 irq_ops->process_reg_update(vfe_dev,
2192 irq_status0, irq_status1, &ts);
2193 irq_ops->process_epoch_irq(vfe_dev,
2194 irq_status0, irq_status1, &ts);
2198 int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
2200 struct msm_vfe_axi_src_state *src_state = arg;
2201 if (src_state->input_src >= VFE_SRC_MAX)
2203 vfe_dev->axi_data.src_info[src_state->input_src].active =
2204 src_state->src_active;
2205 vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
2206 src_state->src_frame_id;
2210 static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
2211 struct device *dev, unsigned long iova, int flags, void *token)
2213 struct vfe_device *vfe_dev = NULL;
2216 vfe_dev = (struct vfe_device *)token;
2217 vfe_dev->page_fault_addr = iova;
2218 if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops ||
2219 !vfe_dev->axi_data.num_active_stream) {
2220 pr_err("%s:%d buf_mgr %pK active strms %d\n", __func__,
2221 __LINE__, vfe_dev->buf_mgr,
2222 vfe_dev->axi_data.num_active_stream);
2226 mutex_lock(&vfe_dev->core_mutex);
2227 if (vfe_dev->vfe_open_cnt > 0) {
2228 pr_err_ratelimited("%s: fault address is %lx\n",
2230 msm_isp_process_iommu_page_fault(vfe_dev);
2232 pr_err("%s: no handling, vfe open cnt = %d\n",
2233 __func__, vfe_dev->vfe_open_cnt);
2235 mutex_unlock(&vfe_dev->core_mutex);
2237 ISP_DBG("%s:%d] no token received: %pK\n",
2238 __func__, __LINE__, token);
2245 int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2247 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2250 ISP_DBG("%s open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2252 if (vfe_dev->common_data == NULL ||
2253 vfe_dev->common_data->dual_vfe_res == NULL) {
2254 pr_err("%s: Error in probe. No common_data or dual vfe res\n",
2259 mutex_lock(&vfe_dev->realtime_mutex);
2260 mutex_lock(&vfe_dev->core_mutex);
2262 if (vfe_dev->vfe_open_cnt++) {
2263 mutex_unlock(&vfe_dev->core_mutex);
2264 mutex_unlock(&vfe_dev->realtime_mutex);
2268 vfe_dev->reset_pending = 0;
2269 vfe_dev->isp_sof_debug = 0;
2270 vfe_dev->isp_raw0_debug = 0;
2271 vfe_dev->isp_raw1_debug = 0;
2272 vfe_dev->isp_raw2_debug = 0;
2274 if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
2275 pr_err("%s: init hardware failed\n", __func__);
2276 vfe_dev->vfe_open_cnt--;
2277 mutex_unlock(&vfe_dev->core_mutex);
2278 mutex_unlock(&vfe_dev->realtime_mutex);
2282 memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2283 atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
2285 vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
2287 vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
2288 ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
2289 rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
2291 pr_err("%s: reset timeout\n", __func__);
2292 vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2293 vfe_dev->vfe_open_cnt--;
2294 mutex_unlock(&vfe_dev->core_mutex);
2295 mutex_unlock(&vfe_dev->realtime_mutex);
2299 vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
2301 vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr,
2304 memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
2305 memset(&vfe_dev->stats_data, 0,
2306 sizeof(struct msm_vfe_stats_shared_data));
2307 memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2308 memset(&vfe_dev->fetch_engine_info, 0,
2309 sizeof(vfe_dev->fetch_engine_info));
2310 vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
2311 vfe_dev->axi_data.enable_frameid_recovery = 0;
2312 vfe_dev->vt_enable = 0;
2313 vfe_dev->reg_update_requested = 0;
2314 /* Register page fault handler */
2315 vfe_dev->buf_mgr->pagefault_debug_disable = 0;
2316 /* initialize pd_buf_idx with an invalid index 0xF */
2317 vfe_dev->common_data->pd_buf_idx = 0xF;
2319 cam_smmu_reg_client_page_fault_handler(
2320 vfe_dev->buf_mgr->iommu_hdl,
2321 msm_vfe_iommu_fault_handler,
2324 mutex_unlock(&vfe_dev->core_mutex);
2325 mutex_unlock(&vfe_dev->realtime_mutex);
2329 #ifdef CONFIG_MSM_AVTIMER
2330 void msm_isp_end_avtimer(void)
2332 avcs_core_disable_power_collapse(0);
2335 void msm_isp_end_avtimer(void)
2337 pr_err("AV Timer is not supported\n");
2341 int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2346 struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2347 ISP_DBG("%s E open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2348 mutex_lock(&vfe_dev->realtime_mutex);
2349 mutex_lock(&vfe_dev->core_mutex);
2351 if (!vfe_dev->vfe_open_cnt) {
2352 pr_err("%s invalid state open cnt %d\n", __func__,
2353 vfe_dev->vfe_open_cnt);
2354 mutex_unlock(&vfe_dev->core_mutex);
2355 mutex_unlock(&vfe_dev->realtime_mutex);
2359 if (vfe_dev->vfe_open_cnt > 1) {
2360 vfe_dev->vfe_open_cnt--;
2361 mutex_unlock(&vfe_dev->core_mutex);
2362 mutex_unlock(&vfe_dev->realtime_mutex);
2365 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
2366 msm_isp_release_all_axi_stream(vfe_dev);
2367 msm_isp_release_all_stats_stream(vfe_dev);
2369 /* Unregister page fault handler */
2370 cam_smmu_reg_client_page_fault_handler(
2371 vfe_dev->buf_mgr->iommu_hdl,
2372 NULL, NULL, vfe_dev);
2374 rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
2376 pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
2378 vfe_dev->hw_info->vfe_ops.core_ops.
2379 update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
2380 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 0);
2382 /* put scratch buf in all the wm */
2383 for (wm = 0; wm < vfe_dev->axi_data.hw_info->num_wm; wm++) {
2384 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PING_FLAG);
2385 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PONG_FLAG);
2387 vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2388 /* after regular hw stop, reduce open cnt */
2389 vfe_dev->vfe_open_cnt--;
2390 vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
2391 if (vfe_dev->vt_enable) {
2392 msm_isp_end_avtimer();
2393 vfe_dev->vt_enable = 0;
2395 for (i = 0; i < VFE_SRC_MAX; i++)
2396 vfe_dev->axi_data.src_info[i].lpm = 0;
2397 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
2398 vfe_dev->is_split = 0;
2400 mutex_unlock(&vfe_dev->core_mutex);
2401 mutex_unlock(&vfe_dev->realtime_mutex);
2405 void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
2407 unsigned long flags;
2409 struct msm_vfe_tasklet_queue_cmd *queue_cmd, *q_cmd_next;
2410 struct msm_vfe_tasklet *tasklet;
2412 for (i = 0; i <= MAX_VFE; i++) {
2413 if (i != vfe_dev->pdev->id &&
2416 tasklet = &vfe_dev->common_data->tasklets[i];
2417 spin_lock_irqsave(&tasklet->tasklet_lock, flags);
2418 list_for_each_entry_safe(queue_cmd, q_cmd_next,
2419 &tasklet->tasklet_q, list) {
2420 if (queue_cmd->vfe_dev != vfe_dev)
2422 list_del_init(&queue_cmd->list);
2423 queue_cmd->cmd_used = 0;
2425 spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2426 tasklet_kill(&tasklet->tasklet);
2428 atomic_set(&vfe_dev->irq_cnt, 0);
2433 void msm_isp_irq_debug_dump(struct vfe_device *vfe_dev)
2436 uint8_t i, dump_index;
2437 unsigned long flags;
2439 spin_lock_irqsave(&vfe_dev->common_data->vfe_irq_dump.
2440 common_dev_irq_dump_lock, flags);
2441 dump_index = vfe_dev->common_data->vfe_irq_dump.
2443 for (i = 0; i < MAX_VFE_IRQ_DEBUG_DUMP_SIZE; i++) {
2444 trace_msm_cam_ping_pong_debug_dump(
2445 vfe_dev->common_data->vfe_irq_dump.
2446 irq_debug[dump_index % MAX_VFE_IRQ_DEBUG_DUMP_SIZE]);
2449 spin_unlock_irqrestore(&vfe_dev->common_data->vfe_irq_dump.
2450 common_dev_irq_dump_lock, flags);
2454 void msm_isp_tasklet_debug_dump(struct vfe_device *vfe_dev)
2457 uint8_t i, dump_index;
2458 unsigned long flags;
2460 spin_lock_irqsave(&vfe_dev->common_data->vfe_irq_dump.
2461 common_dev_tasklet_dump_lock, flags);
2462 dump_index = vfe_dev->common_data->vfe_irq_dump.
2463 current_tasklet_index;
2464 for (i = 0; i < MAX_VFE_IRQ_DEBUG_DUMP_SIZE; i++) {
2465 trace_msm_cam_tasklet_debug_dump(
2466 vfe_dev->common_data->vfe_irq_dump.
2468 dump_index % MAX_VFE_IRQ_DEBUG_DUMP_SIZE]);
2471 spin_unlock_irqrestore(&vfe_dev->common_data->vfe_irq_dump.
2472 common_dev_tasklet_dump_lock, flags);
2475 void msm_isp_dump_ping_pong_mismatch(struct vfe_device *vfe_dev)
2478 trace_msm_cam_string(" ***** msm_isp_dump_irq_debug ****");
2479 msm_isp_irq_debug_dump(vfe_dev);
2480 trace_msm_cam_string(" ***** msm_isp_dump_taskelet_debug ****");
2481 msm_isp_tasklet_debug_dump(vfe_dev);