OSDN Git Service

Merge "msm: camera: cpp: Issue CPP HALT on page fault"
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / media / platform / msm / camera_v2 / isp / msm_isp_util.c
1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12 #include <linux/mutex.h>
13 #include <linux/io.h>
14 #include <media/v4l2-subdev.h>
15 #include <linux/ratelimit.h>
16
17 #include "msm.h"
18 #include "msm_isp_util.h"
19 #include "msm_isp_axi_util.h"
20 #include "msm_isp_stats_util.h"
21 #include "msm_camera_io_util.h"
22 #include "cam_smmu_api.h"
23 #include "msm_isp48.h"
24 #define CREATE_TRACE_POINTS
25 #include "trace/events/msm_cam.h"
26
27
28 #define MAX_ISP_V4l2_EVENTS 100
29 #define MAX_ISP_REG_LIST 100
30 static DEFINE_MUTEX(bandwidth_mgr_mutex);
31 static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
32
33 #define MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev) { \
34         if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) { \
35                 struct vfe_device *vfe1_dev = vfe_dev->common_data-> \
36                                         dual_vfe_res->vfe_dev[ISP_VFE1]; \
37                 mutex_lock(&vfe1_dev->core_mutex); \
38         } \
39 }
40
41 #define MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev) { \
42         if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) { \
43                 struct vfe_device *vfe1_dev = vfe_dev->common_data-> \
44                                         dual_vfe_res->vfe_dev[ISP_VFE1]; \
45                 mutex_unlock(&vfe1_dev->core_mutex); \
46         } \
47 }
48
49 static uint64_t msm_isp_cpp_clk_rate;
50
51 #define VFE40_8974V2_VERSION 0x1001001A
52
53 void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
54 {
55         int i;
56         char text[5];
57         text[4] = '\0';
58         for (i = 0; i < 4; i++) {
59                 text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
60                 if ((text[i] < '0') || (text[i] > 'z')) {
61                         pr_err("%s: Invalid output format %d (unprintable)\n",
62                                 origin, fourcc_format);
63                         return;
64                 }
65         }
66         pr_err("%s: Invalid output format %s\n",
67                 origin, text);
68         return;
69 }
70
71 int msm_isp_init_bandwidth_mgr(struct vfe_device *vfe_dev,
72                         enum msm_isp_hw_client client)
73 {
74         int rc = 0;
75
76         mutex_lock(&bandwidth_mgr_mutex);
77         if (isp_bandwidth_mgr.client_info[client].active) {
78                 mutex_unlock(&bandwidth_mgr_mutex);
79                 return rc;
80         }
81         isp_bandwidth_mgr.client_info[client].active = 1;
82         isp_bandwidth_mgr.use_count++;
83         if (vfe_dev && !isp_bandwidth_mgr.bus_client) {
84                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.init_bw_mgr(vfe_dev,
85                                 &isp_bandwidth_mgr);
86                 if (!rc) {
87                         isp_bandwidth_mgr.update_bw =
88                         vfe_dev->hw_info->vfe_ops.platform_ops.update_bw;
89                         isp_bandwidth_mgr.deinit_bw_mgr =
90                         vfe_dev->hw_info->vfe_ops.platform_ops.deinit_bw_mgr;
91                 }
92         }
93         if (rc) {
94                 isp_bandwidth_mgr.use_count--;
95                 isp_bandwidth_mgr.client_info[client].active = 0;
96         }
97
98         mutex_unlock(&bandwidth_mgr_mutex);
99         return rc;
100 }
101
102 int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
103         uint64_t ab, uint64_t ib)
104 {
105         int rc;
106
107         mutex_lock(&bandwidth_mgr_mutex);
108         if (!isp_bandwidth_mgr.use_count ||
109                 !isp_bandwidth_mgr.bus_client) {
110                 pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
111                         __func__, isp_bandwidth_mgr.use_count,
112                         isp_bandwidth_mgr.bus_client);
113                 mutex_unlock(&bandwidth_mgr_mutex);
114                 return -EINVAL;
115         }
116
117         isp_bandwidth_mgr.client_info[client].ab = ab;
118         isp_bandwidth_mgr.client_info[client].ib = ib;
119         rc = isp_bandwidth_mgr.update_bw(&isp_bandwidth_mgr);
120         mutex_unlock(&bandwidth_mgr_mutex);
121         return 0;
122 }
123
124 void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
125 {
126         if (client >= MAX_ISP_CLIENT) {
127                 pr_err("invalid Client id %d", client);
128                 return;
129         }
130         mutex_lock(&bandwidth_mgr_mutex);
131         memset(&isp_bandwidth_mgr.client_info[client], 0,
132                         sizeof(struct msm_isp_bandwidth_info));
133         if (isp_bandwidth_mgr.use_count) {
134                 isp_bandwidth_mgr.use_count--;
135                 if (isp_bandwidth_mgr.use_count) {
136                         mutex_unlock(&bandwidth_mgr_mutex);
137                         return;
138                 }
139
140                 if (!isp_bandwidth_mgr.bus_client) {
141                         pr_err("%s:%d error: bus client invalid\n",
142                                 __func__, __LINE__);
143                         mutex_unlock(&bandwidth_mgr_mutex);
144                         return;
145                 }
146
147                 isp_bandwidth_mgr.deinit_bw_mgr(
148                                 &isp_bandwidth_mgr);
149         }
150         mutex_unlock(&bandwidth_mgr_mutex);
151 }
152
153 void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
154                                       struct msm_isp_statistics *stats)
155 {
156         stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
157         stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
158         stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
159
160         stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
161         stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
162         stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
163
164         stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
165         stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
166         stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
167         stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
168         stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
169         stats->vfe_clk_rate = vfe_dev->vfe_clk_info[
170                                 vfe_dev->hw_info->vfe_clk_idx].clk_rate;
171         stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
172 }
173
174 void msm_isp_util_update_clk_rate(long clock_rate)
175 {
176         msm_isp_cpp_clk_rate = clock_rate;
177 }
178
179 uint32_t msm_isp_get_framedrop_period(
180         enum msm_vfe_frame_skip_pattern frame_skip_pattern)
181 {
182         switch (frame_skip_pattern) {
183         case NO_SKIP:
184         case EVERY_2FRAME:
185         case EVERY_3FRAME:
186         case EVERY_4FRAME:
187         case EVERY_5FRAME:
188         case EVERY_6FRAME:
189         case EVERY_7FRAME:
190         case EVERY_8FRAME:
191                 return frame_skip_pattern + 1;
192         case EVERY_16FRAME:
193                 return 16;
194                 break;
195         case EVERY_32FRAME:
196                 return 32;
197                 break;
198         case SKIP_ALL:
199                 return 1;
200         default:
201                 return 1;
202         }
203         return 1;
204 }
205
206 void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
207         struct vfe_device *vfe_dev)
208 {
209         struct timespec ts;
210
211         do_gettimeofday(&(time_stamp->event_time));
212         if (vfe_dev->vt_enable) {
213                 msm_isp_get_avtimer_ts(time_stamp);
214                 time_stamp->buf_time.tv_sec    = time_stamp->vt_time.tv_sec;
215                 time_stamp->buf_time.tv_usec   = time_stamp->vt_time.tv_usec;
216         } else {
217                 get_monotonic_boottime(&ts);
218                 time_stamp->buf_time.tv_sec    = ts.tv_sec;
219                 time_stamp->buf_time.tv_usec   = ts.tv_nsec/1000;
220         }
221 }
222
223 static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
224 {
225         u32 evt_id = ISP_EVENT_SUBS_MASK_NONE;
226
227         switch (evt_mask) {
228         case ISP_EVENT_MASK_INDEX_STATS_NOTIFY:
229                 evt_id = ISP_EVENT_STATS_NOTIFY;
230                 break;
231         case ISP_EVENT_MASK_INDEX_ERROR:
232                 evt_id = ISP_EVENT_ERROR;
233                 break;
234         case ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT:
235                 evt_id = ISP_EVENT_IOMMU_P_FAULT;
236                 break;
237         case ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE:
238                 evt_id = ISP_EVENT_STREAM_UPDATE_DONE;
239                 break;
240         case ISP_EVENT_MASK_INDEX_REG_UPDATE:
241                 evt_id = ISP_EVENT_REG_UPDATE;
242                 break;
243         case ISP_EVENT_MASK_INDEX_SOF:
244                 evt_id = ISP_EVENT_SOF;
245                 break;
246         case ISP_EVENT_MASK_INDEX_BUF_DIVERT:
247                 evt_id = ISP_EVENT_BUF_DIVERT;
248                 break;
249         case ISP_EVENT_MASK_INDEX_BUF_DONE:
250                 evt_id = ISP_EVENT_BUF_DONE;
251                 break;
252         case ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY:
253                 evt_id = ISP_EVENT_COMP_STATS_NOTIFY;
254                 break;
255         case ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE:
256                 evt_id = ISP_EVENT_FE_READ_DONE;
257                 break;
258         case ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH:
259                 evt_id = ISP_EVENT_PING_PONG_MISMATCH;
260                 break;
261         case ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING:
262                 evt_id = ISP_EVENT_REG_UPDATE_MISSING;
263                 break;
264         case ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR:
265                 evt_id = ISP_EVENT_BUF_FATAL_ERROR;
266                 break;
267         default:
268                 evt_id = ISP_EVENT_SUBS_MASK_NONE;
269                 break;
270         }
271
272         return evt_id;
273 }
274
275 static inline int msm_isp_subscribe_event_mask(struct v4l2_fh *fh,
276                 struct v4l2_event_subscription *sub, int evt_mask_index,
277                 u32 evt_id, bool subscribe_flag)
278 {
279         int rc = 0, i, interface;
280
281         if (ISP_EVENT_MASK_INDEX_STATS_NOTIFY == evt_mask_index) {
282                 for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
283                         sub->type = evt_id + i;
284                         if (subscribe_flag)
285                                 rc = v4l2_event_subscribe(fh, sub,
286                                         MAX_ISP_V4l2_EVENTS, NULL);
287                         else
288                                 rc = v4l2_event_unsubscribe(fh, sub);
289                         if (rc != 0) {
290                                 pr_err("%s: Subs event_type =0x%x failed\n",
291                                         __func__, sub->type);
292                                 return rc;
293                         }
294                 }
295         } else if (ISP_EVENT_MASK_INDEX_SOF == evt_mask_index ||
296                    ISP_EVENT_MASK_INDEX_REG_UPDATE == evt_mask_index ||
297                    ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE == evt_mask_index) {
298                 for (interface = 0; interface < VFE_SRC_MAX; interface++) {
299                         sub->type = evt_id | interface;
300                         if (subscribe_flag)
301                                 rc = v4l2_event_subscribe(fh, sub,
302                                         MAX_ISP_V4l2_EVENTS, NULL);
303                         else
304                                 rc = v4l2_event_unsubscribe(fh, sub);
305                         if (rc != 0) {
306                                 pr_err("%s: Subs event_type =0x%x failed\n",
307                                         __func__, sub->type);
308                                 return rc;
309                         }
310                 }
311         } else {
312                 sub->type = evt_id;
313                 if (subscribe_flag)
314                         rc = v4l2_event_subscribe(fh, sub,
315                                 MAX_ISP_V4l2_EVENTS, NULL);
316                 else
317                         rc = v4l2_event_unsubscribe(fh, sub);
318                 if (rc != 0) {
319                         pr_err("%s: Subs event_type =0x%x failed\n",
320                                 __func__, sub->type);
321                         return rc;
322                 }
323         }
324         return rc;
325 }
326
327 static inline int msm_isp_process_event_subscription(struct v4l2_fh *fh,
328         struct v4l2_event_subscription *sub, bool subscribe_flag)
329 {
330         int rc = 0, evt_mask_index = 0;
331         u32 evt_mask = sub->type;
332         u32 evt_id = 0;
333
334         if (ISP_EVENT_SUBS_MASK_NONE == evt_mask) {
335                 pr_err("%s: Subs event_type is None=0x%x\n",
336                         __func__, evt_mask);
337                 return 0;
338         }
339
340         for (evt_mask_index = ISP_EVENT_MASK_INDEX_STATS_NOTIFY;
341                 evt_mask_index <= ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR;
342                 evt_mask_index++) {
343                 if (evt_mask & (1<<evt_mask_index)) {
344                         evt_id = msm_isp_evt_mask_to_isp_event(evt_mask_index);
345                         rc = msm_isp_subscribe_event_mask(fh, sub,
346                                 evt_mask_index, evt_id, subscribe_flag);
347                         if (rc != 0) {
348                                 pr_err("%s: Subs event index:%d failed\n",
349                                         __func__, evt_mask_index);
350                                 return rc;
351                         }
352                 }
353         }
354         return rc;
355 }
356
357 int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
358         struct v4l2_event_subscription *sub)
359 {
360         return msm_isp_process_event_subscription(fh, sub, true);
361 }
362
363 int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
364         struct v4l2_event_subscription *sub)
365 {
366         return msm_isp_process_event_subscription(fh, sub, false);
367 }
368
369 static int msm_isp_start_fetch_engine(struct vfe_device *vfe_dev,
370         void *arg)
371 {
372         struct msm_vfe_fetch_eng_start *fe_cfg = arg;
373         /*
374          * For Offline VFE, HAL expects same frame id
375          * for offline output which it requested in do_reprocess.
376          */
377         vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
378                 fe_cfg->frame_id;
379         return vfe_dev->hw_info->vfe_ops.core_ops.
380                 start_fetch_eng(vfe_dev, arg);
381 }
382
383 static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
384         void *arg)
385 {
386         struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
387         struct msm_vfe_axi_stream *stream_info = NULL;
388         int i = 0, rc;
389         uint32_t wm_reload_mask = 0;
390         int vfe_idx;
391         /*
392          * For Offline VFE, HAL expects same frame id
393          * for offline output which it requested in do_reprocess.
394          */
395         vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
396                 fe_cfg->frame_id;
397         if (fe_cfg->offline_pass == OFFLINE_SECOND_PASS) {
398                 stream_info = msm_isp_get_stream_common_data(vfe_dev,
399                         HANDLE_TO_IDX(fe_cfg->output_stream_id));
400                 if (stream_info == NULL) {
401                         pr_err("%s: Error in Offline process\n", __func__);
402                         return -EINVAL;
403                 }
404                 vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
405                 msm_isp_reset_framedrop(vfe_dev, stream_info);
406
407                 rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
408                         VFE_PING_FLAG, fe_cfg->output_buf_idx);
409                 if (rc < 0) {
410                         pr_err("%s: Fetch engine config failed\n", __func__);
411                         return -EINVAL;
412                 }
413                 for (i = 0; i < stream_info->num_planes; i++)
414                         wm_reload_mask |= (1 << stream_info->wm[vfe_idx][i]);
415                 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
416                         VFE_SRC_MAX);
417                 vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
418                         vfe_dev->vfe_base, wm_reload_mask);
419         }
420         return vfe_dev->hw_info->vfe_ops.core_ops.
421                 start_fetch_eng_multi_pass(vfe_dev, arg);
422 }
423
424 void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
425         struct msm_vfe_fetch_engine_info *fetch_engine_info)
426 {
427         struct msm_isp_event_data fe_rd_done_event;
428         memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data));
429         fe_rd_done_event.frame_id =
430                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
431         fe_rd_done_event.u.fetch_done.session_id =
432                 fetch_engine_info->session_id;
433         fe_rd_done_event.u.fetch_done.stream_id = fetch_engine_info->stream_id;
434         fe_rd_done_event.u.fetch_done.handle = fetch_engine_info->bufq_handle;
435         fe_rd_done_event.u.fetch_done.buf_idx = fetch_engine_info->buf_idx;
436         fe_rd_done_event.u.fetch_done.fd = fetch_engine_info->fd;
437         fe_rd_done_event.u.fetch_done.offline_mode =
438                 fetch_engine_info->offline_mode;
439
440         ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
441                 __func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
442         fetch_engine_info->is_busy = 0;
443         msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
444 }
445
446 static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
447         struct msm_vfe_input_cfg *input_cfg)
448 {
449         int rc = 0;
450         struct msm_vfe_pix_cfg *pix_cfg = NULL;
451
452         if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
453                 pr_err("%s: pixel path is active\n", __func__);
454                 return -EINVAL;
455         }
456
457         pix_cfg = &input_cfg->d.pix_cfg;
458         vfe_dev->hvx_cmd = pix_cfg->hvx_cmd;
459         vfe_dev->is_split = input_cfg->d.pix_cfg.is_split;
460
461         vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
462                 input_cfg->input_pix_clk;
463         vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
464                 input_cfg->d.pix_cfg.input_mux;
465         vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
466                 input_cfg->d.pix_cfg.input_format;
467         vfe_dev->axi_data.src_info[VFE_PIX_0].sof_counter_step = 1;
468
469         /*
470          * Fill pixel_clock into input_pix_clk so that user space
471          * can use rounded clk rate
472          */
473         input_cfg->input_pix_clk =
474                 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock;
475
476         ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
477                 input_cfg->d.pix_cfg.input_mux, CAMIF,
478                 input_cfg->d.pix_cfg.input_format);
479
480         if (input_cfg->d.pix_cfg.input_mux == CAMIF ||
481                 input_cfg->d.pix_cfg.input_mux == TESTGEN) {
482                 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
483                         input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
484                 if (input_cfg->d.pix_cfg.camif_cfg.subsample_cfg.
485                         sof_counter_step > 0) {
486                         vfe_dev->axi_data.src_info[VFE_PIX_0].
487                                 sof_counter_step = input_cfg->d.pix_cfg.
488                                 camif_cfg.subsample_cfg.sof_counter_step;
489                 }
490         } else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
491                 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
492                         input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
493         }
494         vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
495                         vfe_dev, &input_cfg->d.pix_cfg);
496         vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_PIX_0);
497         return rc;
498 }
499
500 static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
501         struct msm_vfe_input_cfg *input_cfg)
502 {
503         int rc = 0;
504         if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
505                 pr_err("%s: RAW%d path is active\n", __func__,
506                            input_cfg->input_src - VFE_RAW_0);
507                 return -EINVAL;
508         }
509
510         vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
511                 input_cfg->input_pix_clk;
512         vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
513                 vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
514         return rc;
515 }
516
517 int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
518 {
519         int rc = 0;
520         struct msm_vfe_input_cfg *input_cfg = arg;
521         long pixel_clock = 0;
522
523         switch (input_cfg->input_src) {
524         case VFE_PIX_0:
525                 rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
526                 break;
527         case VFE_RAW_0:
528         case VFE_RAW_1:
529         case VFE_RAW_2:
530                 rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
531                 break;
532         default:
533                 pr_err("%s: Invalid input source\n", __func__);
534                 rc = -EINVAL;
535         }
536
537         pixel_clock = input_cfg->input_pix_clk;
538         /*
539          * Only set rate to higher, do not lower higher
540          * rate needed by another input
541          */
542         if (pixel_clock > vfe_dev->vfe_clk_info[
543                                 vfe_dev->hw_info->vfe_clk_idx].clk_rate) {
544                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(
545                         vfe_dev,
546                         &pixel_clock);
547                 if (rc < 0) {
548                         pr_err("%s: clock set rate failed\n", __func__);
549                         return rc;
550                 }
551         }
552         return rc;
553 }
554
555 static int msm_isp_dual_hw_master_slave_sync(struct vfe_device *vfe_dev,
556                                                 void *arg)
557 {
558         int rc = 0;
559
560         struct msm_isp_dual_hw_master_slave_sync *link = arg;
561         unsigned long flags;
562         struct master_slave_resource_info *ms_res =
563                         &vfe_dev->common_data->ms_resource;
564         int i;
565         struct msm_vfe_src_info *src_info = NULL;
566
567         spin_lock_irqsave(
568                         &vfe_dev->common_data->common_dev_data_lock,
569                         flags);
570         ms_res->dual_sync_mode = link->sync_mode;
571         if (ms_res->dual_sync_mode == MSM_ISP_DUAL_CAM_ASYNC) {
572                 for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
573                         if (ms_res->src_info[i] == NULL)
574                                 continue;
575                         src_info = ms_res->src_info[i];
576                         if (src_info->dual_hw_ms_info.sync_state ==
577                                 MSM_ISP_DUAL_CAM_ASYNC)
578                                 continue;
579                         ms_res->active_src_mask &= ~(1 <<
580                                 src_info->dual_hw_ms_info.index);
581                         ms_res->src_sof_mask &= ~(1 <<
582                                 src_info->dual_hw_ms_info.index);
583                         src_info->dual_hw_ms_info.sync_state =
584                                 MSM_ISP_DUAL_CAM_ASYNC;
585                 }
586         }
587         spin_unlock_irqrestore(
588                         &vfe_dev->common_data->common_dev_data_lock,
589                         flags);
590         return rc;
591 }
592
593 static int msm_isp_set_dual_HW_master_slave_mode(
594         struct vfe_device *vfe_dev, void *arg)
595 {
596         int rc = 0, i;
597         struct msm_isp_set_dual_hw_ms_cmd *dual_hw_ms_cmd = NULL;
598         struct msm_vfe_src_info *src_info = NULL;
599         unsigned long flags;
600         struct master_slave_resource_info *ms_res =
601                         &vfe_dev->common_data->ms_resource;
602
603         if (!vfe_dev || !arg) {
604                 pr_err("%s: Error! Invalid input vfe_dev %pK arg %pK\n",
605                         __func__, vfe_dev, arg);
606                 return -EINVAL;
607         }
608
609         spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
610         dual_hw_ms_cmd = (struct msm_isp_set_dual_hw_ms_cmd *)arg;
611         vfe_dev->common_data->ms_resource.dual_hw_type = DUAL_HW_MASTER_SLAVE;
612         vfe_dev->vfe_ub_policy = MSM_WM_UB_EQUAL_SLICING;
613         if (dual_hw_ms_cmd->primary_intf < VFE_SRC_MAX) {
614                 ISP_DBG("%s: vfe %d primary_intf %d\n", __func__,
615                         vfe_dev->pdev->id, dual_hw_ms_cmd->primary_intf);
616                 src_info = &vfe_dev->axi_data.
617                         src_info[dual_hw_ms_cmd->primary_intf];
618                 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
619                 src_info->dual_hw_ms_info.dual_hw_ms_type =
620                         dual_hw_ms_cmd->dual_hw_ms_type;
621                 src_info->dual_hw_ms_info.index = dual_hw_ms_cmd->
622                         primary_intf + VFE_SRC_MAX * vfe_dev->pdev->id;
623                 ms_res->src_info[src_info->dual_hw_ms_info.index] = src_info;
624                 ms_res->num_src++;
625                 if (dual_hw_ms_cmd->dual_hw_ms_type == MS_TYPE_MASTER) {
626                         ms_res->master_index = src_info->dual_hw_ms_info.index;
627                         ms_res->sof_delta_threshold =
628                                 dual_hw_ms_cmd->sof_delta_threshold;
629                 } else {
630                         ms_res->primary_slv_idx =
631                                 src_info->dual_hw_ms_info.index;
632                 }
633         }
634         ISP_DBG("%s: vfe %d num_src %d\n", __func__, vfe_dev->pdev->id,
635                 dual_hw_ms_cmd->num_src);
636         if (dual_hw_ms_cmd->num_src > VFE_SRC_MAX) {
637                 pr_err("%s: Error! Invalid num_src %d\n", __func__,
638                         dual_hw_ms_cmd->num_src);
639                 spin_unlock_irqrestore(&vfe_dev->common_data->
640                         common_dev_data_lock, flags);
641                 return -EINVAL;
642         }
643         /* This for loop is for non-primary intf to be marked with Master/Slave
644          * in order for frame id sync. But their timestamp is not saved.
645          * So no sof_info resource is allocated */
646         for (i = 0; i < dual_hw_ms_cmd->num_src; i++) {
647                 if (dual_hw_ms_cmd->input_src[i] >= VFE_SRC_MAX) {
648                         pr_err("%s: Error! Invalid SRC param %d\n", __func__,
649                                 dual_hw_ms_cmd->input_src[i]);
650                         spin_unlock_irqrestore(&vfe_dev->common_data->
651                                         common_dev_data_lock, flags);
652                         return -EINVAL;
653                 }
654                 ISP_DBG("%s: vfe %d src %d type %d\n", __func__,
655                         vfe_dev->pdev->id, dual_hw_ms_cmd->input_src[i],
656                         dual_hw_ms_cmd->dual_hw_ms_type);
657                 src_info = &vfe_dev->axi_data.
658                         src_info[dual_hw_ms_cmd->input_src[i]];
659                 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
660                 src_info->dual_hw_ms_info.dual_hw_ms_type =
661                         dual_hw_ms_cmd->dual_hw_ms_type;
662                 src_info->dual_hw_ms_info.index = dual_hw_ms_cmd->
663                         input_src[i] + VFE_SRC_MAX * vfe_dev->pdev->id;
664                 ms_res->src_info[src_info->dual_hw_ms_info.index] = src_info;
665                 ms_res->num_src++;
666         }
667         spin_unlock_irqrestore(&vfe_dev->common_data->common_dev_data_lock,
668                                 flags);
669         return rc;
670 }
671
672 static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
673 {
674         int rc = 0;
675         uint32_t count = 0;
676         struct msm_vfe_cfg_cmd_list *proc_cmd =
677                 (struct msm_vfe_cfg_cmd_list *)arg;
678         struct msm_vfe_cfg_cmd_list cmd, cmd_next;
679
680         if (!vfe_dev || !arg) {
681                 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
682                         vfe_dev, arg);
683                 return -EINVAL;
684         }
685
686         rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
687         if (rc < 0)
688                 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
689
690         cmd = *proc_cmd;
691
692         while (cmd.next) {
693                 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
694                         pr_err("%s:%d failed: next size %u != expected %zu\n",
695                                 __func__, __LINE__, cmd.next_size,
696                                 sizeof(struct msm_vfe_cfg_cmd_list));
697                         break;
698                 }
699                 if (++count >= MAX_ISP_REG_LIST) {
700                         pr_err("%s:%d Error exceeding the max register count:%u\n",
701                                 __func__, __LINE__, count);
702                         rc = -EINVAL;
703                         break;
704                 }
705                 if (copy_from_user(&cmd_next, (void __user *)cmd.next,
706                         sizeof(struct msm_vfe_cfg_cmd_list))) {
707                         rc = -EFAULT;
708                         continue;
709                 }
710
711                 rc = msm_isp_proc_cmd(vfe_dev, &cmd_next.cfg_cmd);
712                 if (rc < 0)
713                         pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
714
715                 cmd = cmd_next;
716         }
717         return rc;
718 }
719
720 #ifdef CONFIG_COMPAT
721 struct msm_vfe_cfg_cmd2_32 {
722         uint16_t num_cfg;
723         uint16_t cmd_len;
724         compat_caddr_t cfg_data;
725         compat_caddr_t cfg_cmd;
726 };
727
728 struct msm_vfe_cfg_cmd_list_32 {
729         struct msm_vfe_cfg_cmd2_32   cfg_cmd;
730         compat_caddr_t               next;
731         uint32_t                     next_size;
732 };
733
734 #define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
735         _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
736 #define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
737         _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
738
739 static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
740         struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
741 {
742         proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
743         proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
744         proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
745         proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
746 }
747
748 static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
749 {
750         int rc = 0;
751         uint32_t count = 0;
752         struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
753                 (struct msm_vfe_cfg_cmd_list_32 *)arg;
754         struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
755         struct msm_vfe_cfg_cmd2 current_cmd;
756
757         if (!vfe_dev || !arg) {
758                 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
759                         vfe_dev, arg);
760                 return -EINVAL;
761         }
762         msm_isp_compat_to_proc_cmd(&current_cmd, &proc_cmd->cfg_cmd);
763         rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
764         if (rc < 0)
765                 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
766
767         cmd = *proc_cmd;
768
769         while (NULL != compat_ptr(cmd.next)) {
770                 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
771                         pr_err("%s:%d failed: next size %u != expected %zu\n",
772                                 __func__, __LINE__, cmd.next_size,
773                                 sizeof(struct msm_vfe_cfg_cmd_list));
774                         break;
775                 }
776                 if (++count >= MAX_ISP_REG_LIST) {
777                         pr_err("%s:%d Error exceeding the max register count:%u\n",
778                                 __func__, __LINE__, count);
779                         rc = -EINVAL;
780                         break;
781                 }
782                 if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
783                         sizeof(struct msm_vfe_cfg_cmd_list_32))) {
784                         rc = -EFAULT;
785                         continue;
786                 }
787
788                 msm_isp_compat_to_proc_cmd(&current_cmd, &cmd_next.cfg_cmd);
789                 rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
790                 if (rc < 0)
791                         pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
792
793                 cmd = cmd_next;
794         }
795         return rc;
796 }
797
798 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
799 {
800         if (is_compat_task())
801                 return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
802         else
803                 return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
804 }
805 #else /* CONFIG_COMPAT */
806 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
807 {
808         return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
809 }
810 #endif /* CONFIG_COMPAT */
811
812 static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
813         unsigned int cmd, void *arg)
814 {
815         long rc = 0;
816         long rc2 = 0;
817         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
818
819         if (!vfe_dev || !vfe_dev->vfe_base) {
820                 pr_err("%s:%d failed: invalid params %pK\n",
821                         __func__, __LINE__, vfe_dev);
822                 if (vfe_dev)
823                         pr_err("%s:%d failed %pK\n", __func__,
824                                 __LINE__, vfe_dev->vfe_base);
825                 return -EINVAL;
826         }
827
828         /* use real time mutex for hard real-time ioctls such as
829          * buffer operations and register updates.
830          * Use core mutex for other ioctls that could take
831          * longer time to complete such as start/stop ISP streams
832          * which blocks until the hardware start/stop streaming
833          */
834         ISP_DBG("%s: cmd: %d\n", __func__, _IOC_TYPE(cmd));
835         switch (cmd) {
836         case VIDIOC_MSM_VFE_REG_CFG: {
837                 mutex_lock(&vfe_dev->realtime_mutex);
838                 rc = msm_isp_proc_cmd(vfe_dev, arg);
839                 mutex_unlock(&vfe_dev->realtime_mutex);
840                 break;
841         }
842         case VIDIOC_MSM_VFE_REG_LIST_CFG: {
843                 mutex_lock(&vfe_dev->realtime_mutex);
844                 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
845                 mutex_unlock(&vfe_dev->realtime_mutex);
846                 break;
847         }
848         case VIDIOC_MSM_ISP_REQUEST_BUF:
849         case VIDIOC_MSM_ISP_REQUEST_BUF_VER2:
850                 /* fallthrough */
851         case VIDIOC_MSM_ISP_ENQUEUE_BUF:
852                 /* fallthrough */
853         case VIDIOC_MSM_ISP_DEQUEUE_BUF:
854                 /* fallthrough */
855         case VIDIOC_MSM_ISP_UNMAP_BUF: {
856                 mutex_lock(&vfe_dev->buf_mgr->lock);
857                 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
858                 mutex_unlock(&vfe_dev->buf_mgr->lock);
859                 break;
860         }
861         case VIDIOC_MSM_ISP_RELEASE_BUF: {
862                 if (vfe_dev->buf_mgr == NULL) {
863                         pr_err("%s: buf mgr NULL! rc = -1\n", __func__);
864                         rc = -EINVAL;
865                         return rc;
866                 }
867                 mutex_lock(&vfe_dev->buf_mgr->lock);
868                 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
869                 mutex_unlock(&vfe_dev->buf_mgr->lock);
870                 break;
871         }
872         case VIDIOC_MSM_ISP_REQUEST_STREAM:
873                 mutex_lock(&vfe_dev->core_mutex);
874                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
875                 rc = msm_isp_request_axi_stream(vfe_dev, arg);
876                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
877                 mutex_unlock(&vfe_dev->core_mutex);
878                 break;
879         case VIDIOC_MSM_ISP_RELEASE_STREAM:
880                 mutex_lock(&vfe_dev->core_mutex);
881                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
882                 rc = msm_isp_release_axi_stream(vfe_dev, arg);
883                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
884                 mutex_unlock(&vfe_dev->core_mutex);
885                 break;
886         case VIDIOC_MSM_ISP_CFG_STREAM:
887                 mutex_lock(&vfe_dev->core_mutex);
888                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
889                 rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
890                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
891                 mutex_unlock(&vfe_dev->core_mutex);
892                 break;
893         case VIDIOC_MSM_ISP_AXI_HALT:
894                 mutex_lock(&vfe_dev->core_mutex);
895                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
896                 rc = msm_isp_axi_halt(vfe_dev, arg);
897                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
898                 mutex_unlock(&vfe_dev->core_mutex);
899                 break;
900         case VIDIOC_MSM_ISP_AXI_RESET:
901                 mutex_lock(&vfe_dev->core_mutex);
902                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
903                 if (atomic_read(&vfe_dev->error_info.overflow_state)
904                         != HALT_ENFORCED) {
905                         rc = msm_isp_stats_reset(vfe_dev);
906                         rc2 = msm_isp_axi_reset(vfe_dev, arg);
907                         if (!rc && rc2)
908                                 rc = rc2;
909                 } else {
910                         pr_err_ratelimited("%s: no HW reset, halt enforced.\n",
911                                 __func__);
912                 }
913                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
914                 mutex_unlock(&vfe_dev->core_mutex);
915                 break;
916         case VIDIOC_MSM_ISP_AXI_RESTART:
917                 mutex_lock(&vfe_dev->core_mutex);
918                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
919                 if (atomic_read(&vfe_dev->error_info.overflow_state)
920                         != HALT_ENFORCED) {
921                         rc = msm_isp_stats_restart(vfe_dev);
922                         rc2 = msm_isp_axi_restart(vfe_dev, arg);
923                         if (!rc && rc2)
924                                 rc = rc2;
925                 } else {
926                         pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
927                                 __func__);
928                 }
929                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
930                 mutex_unlock(&vfe_dev->core_mutex);
931                 break;
932         case VIDIOC_MSM_ISP_INPUT_CFG:
933                 mutex_lock(&vfe_dev->core_mutex);
934                 rc = msm_isp_cfg_input(vfe_dev, arg);
935                 mutex_unlock(&vfe_dev->core_mutex);
936                 break;
937         case VIDIOC_MSM_ISP_AHB_CLK_CFG:
938                 mutex_lock(&vfe_dev->core_mutex);
939                 if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
940                         rc = vfe_dev->hw_info->vfe_ops.core_ops.
941                                         ahb_clk_cfg(vfe_dev, arg);
942                 else
943                         rc = -EOPNOTSUPP;
944                 mutex_unlock(&vfe_dev->core_mutex);
945                 break;
946         case VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE:
947                 mutex_lock(&vfe_dev->core_mutex);
948                 rc = msm_isp_set_dual_HW_master_slave_mode(vfe_dev, arg);
949                 mutex_unlock(&vfe_dev->core_mutex);
950                 break;
951         case VIDIOC_MSM_ISP_DUAL_HW_MASTER_SLAVE_SYNC:
952                 mutex_lock(&vfe_dev->core_mutex);
953                 rc = msm_isp_dual_hw_master_slave_sync(vfe_dev, arg);
954                 mutex_unlock(&vfe_dev->core_mutex);
955                 break;
956         case VIDIOC_MSM_ISP_DUAL_HW_LPM_MODE:
957                 mutex_lock(&vfe_dev->core_mutex);
958                 rc = msm_isp_ab_ib_update_lpm_mode(vfe_dev, arg);
959                 mutex_unlock(&vfe_dev->core_mutex);
960                 break;
961         case VIDIOC_MSM_ISP_FETCH_ENG_START:
962         case VIDIOC_MSM_ISP_MAP_BUF_START_FE:
963                 mutex_lock(&vfe_dev->core_mutex);
964                 rc = msm_isp_start_fetch_engine(vfe_dev, arg);
965                 mutex_unlock(&vfe_dev->core_mutex);
966                 break;
967
968         case VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START:
969         case VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE:
970                 mutex_lock(&vfe_dev->core_mutex);
971                 rc = msm_isp_start_fetch_engine_multi_pass(vfe_dev, arg);
972                 mutex_unlock(&vfe_dev->core_mutex);
973                 break;
974         case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
975                 if (arg) {
976                         enum msm_vfe_input_src frame_src =
977                                 *((enum msm_vfe_input_src *)arg);
978                         vfe_dev->hw_info->vfe_ops.core_ops.
979                                 reg_update(vfe_dev, frame_src);
980                 }
981                 break;
982         case VIDIOC_MSM_ISP_SET_SRC_STATE:
983                 mutex_lock(&vfe_dev->core_mutex);
984                 rc = msm_isp_set_src_state(vfe_dev, arg);
985                 mutex_unlock(&vfe_dev->core_mutex);
986                 break;
987         case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
988                 mutex_lock(&vfe_dev->core_mutex);
989                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
990                 rc = msm_isp_request_stats_stream(vfe_dev, arg);
991                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
992                 mutex_unlock(&vfe_dev->core_mutex);
993                 break;
994         case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
995                 mutex_lock(&vfe_dev->core_mutex);
996                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
997                 rc = msm_isp_release_stats_stream(vfe_dev, arg);
998                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
999                 mutex_unlock(&vfe_dev->core_mutex);
1000                 break;
1001         case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
1002                 mutex_lock(&vfe_dev->core_mutex);
1003                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
1004                 rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
1005                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
1006                 mutex_unlock(&vfe_dev->core_mutex);
1007                 break;
1008         case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
1009                 mutex_lock(&vfe_dev->core_mutex);
1010                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
1011                 rc = msm_isp_update_stats_stream(vfe_dev, arg);
1012                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
1013                 mutex_unlock(&vfe_dev->core_mutex);
1014                 break;
1015         case VIDIOC_MSM_ISP_UPDATE_STREAM:
1016                 mutex_lock(&vfe_dev->core_mutex);
1017                 MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
1018                 rc = msm_isp_update_axi_stream(vfe_dev, arg);
1019                 MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
1020                 mutex_unlock(&vfe_dev->core_mutex);
1021                 break;
1022         case VIDIOC_MSM_ISP_SMMU_ATTACH:
1023                 mutex_lock(&vfe_dev->core_mutex);
1024                 rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
1025                 mutex_unlock(&vfe_dev->core_mutex);
1026                 break;
1027         case MSM_SD_NOTIFY_FREEZE:
1028                 vfe_dev->isp_sof_debug = 0;
1029                 vfe_dev->isp_raw0_debug = 0;
1030                 vfe_dev->isp_raw1_debug = 0;
1031                 vfe_dev->isp_raw2_debug = 0;
1032                 break;
1033         case MSM_SD_UNNOTIFY_FREEZE:
1034         case MSM_SD_SHUTDOWN:
1035                 break;
1036
1037         default:
1038                 pr_err_ratelimited("%s: Invalid ISP command %d\n", __func__,
1039                                     cmd);
1040                 rc = -EINVAL;
1041         }
1042         return rc;
1043 }
1044
1045
1046 #ifdef CONFIG_COMPAT
1047 static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
1048         unsigned int cmd, void *arg)
1049 {
1050         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
1051         long rc = 0;
1052
1053         if (!vfe_dev || !vfe_dev->vfe_base) {
1054                 pr_err("%s:%d failed: invalid params %pK\n",
1055                         __func__, __LINE__, vfe_dev);
1056                 if (vfe_dev)
1057                         pr_err("%s:%d failed %pK\n", __func__,
1058                                 __LINE__, vfe_dev->vfe_base);
1059                 return -EINVAL;
1060         }
1061
1062         switch (cmd) {
1063         case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
1064                 struct msm_vfe_cfg_cmd2 proc_cmd;
1065                 mutex_lock(&vfe_dev->realtime_mutex);
1066                 msm_isp_compat_to_proc_cmd(&proc_cmd,
1067                         (struct msm_vfe_cfg_cmd2_32 *) arg);
1068                 rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
1069                 mutex_unlock(&vfe_dev->realtime_mutex);
1070                 break;
1071         }
1072         case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
1073                 mutex_lock(&vfe_dev->realtime_mutex);
1074                 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
1075                 mutex_unlock(&vfe_dev->realtime_mutex);
1076                 break;
1077         }
1078         default:
1079                 return msm_isp_ioctl_unlocked(sd, cmd, arg);
1080         }
1081
1082         return rc;
1083 }
1084
1085 long msm_isp_ioctl(struct v4l2_subdev *sd,
1086         unsigned int cmd, void *arg)
1087 {
1088         return msm_isp_ioctl_compat(sd, cmd, arg);
1089 }
1090 #else /* CONFIG_COMPAT */
1091 long msm_isp_ioctl(struct v4l2_subdev *sd,
1092         unsigned int cmd, void *arg)
1093 {
1094         return msm_isp_ioctl_unlocked(sd, cmd, arg);
1095 }
1096 #endif /* CONFIG_COMPAT */
1097
1098 static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
1099         struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
1100         uint32_t *cfg_data, uint32_t cmd_len)
1101 {
1102         if (!vfe_dev || !reg_cfg_cmd) {
1103                 pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
1104                         __LINE__, vfe_dev, reg_cfg_cmd);
1105                 return -EINVAL;
1106         }
1107         if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
1108                 (!cfg_data || !cmd_len)) {
1109                 pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
1110                         __func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
1111                         cmd_len);
1112                 return -EINVAL;
1113         }
1114
1115         /* Validate input parameters */
1116         switch (reg_cfg_cmd->cmd_type) {
1117         case VFE_WRITE:
1118         case VFE_READ:
1119         case VFE_WRITE_MB: {
1120                 if ((reg_cfg_cmd->u.rw_info.reg_offset >
1121                         (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1122                         ((reg_cfg_cmd->u.rw_info.reg_offset +
1123                         reg_cfg_cmd->u.rw_info.len) >
1124                         vfe_dev->vfe_base_size) ||
1125                         (reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
1126                         pr_err_ratelimited("%s:%d regoffset %d len %d res %d\n",
1127                                 __func__, __LINE__,
1128                                 reg_cfg_cmd->u.rw_info.reg_offset,
1129                                 reg_cfg_cmd->u.rw_info.len,
1130                                 (uint32_t)vfe_dev->vfe_base_size);
1131                         return -EINVAL;
1132                 }
1133
1134                 if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
1135                         (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1136                         ((reg_cfg_cmd->u.rw_info.cmd_data_offset +
1137                         reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
1138                         pr_err_ratelimited("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
1139                                 __func__, __LINE__,
1140                                 reg_cfg_cmd->u.rw_info.cmd_data_offset,
1141                                 reg_cfg_cmd->u.rw_info.len, cmd_len);
1142                         return -EINVAL;
1143                 }
1144                 break;
1145         }
1146
1147         case VFE_WRITE_DMI_16BIT:
1148         case VFE_WRITE_DMI_32BIT:
1149         case VFE_WRITE_DMI_64BIT:
1150         case VFE_READ_DMI_16BIT:
1151         case VFE_READ_DMI_32BIT:
1152         case VFE_READ_DMI_64BIT: {
1153                 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
1154                         reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1155                         if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
1156                                 reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
1157                                 (reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
1158                                 reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
1159                                 (sizeof(uint32_t)))) {
1160                                 pr_err("%s:%d hi %d lo %d\n",
1161                                         __func__, __LINE__,
1162                                         reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1163                                         reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
1164                                 return -EINVAL;
1165                         }
1166                         if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
1167                                 pr_err("%s:%d len %d\n",
1168                                         __func__, __LINE__,
1169                                         reg_cfg_cmd->u.dmi_info.len);
1170                                 return -EINVAL;
1171                         }
1172                         if (((UINT_MAX -
1173                                 reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
1174                                 (reg_cfg_cmd->u.dmi_info.len -
1175                                 sizeof(uint32_t))) ||
1176                                 ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
1177                                 reg_cfg_cmd->u.dmi_info.len -
1178                                 sizeof(uint32_t)) > cmd_len)) {
1179                                 pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
1180                                         __func__, __LINE__,
1181                                         reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1182                                         reg_cfg_cmd->u.dmi_info.len, cmd_len);
1183                                 return -EINVAL;
1184                         }
1185                 }
1186                 if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
1187                         (UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
1188                         ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
1189                         reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
1190                         pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
1191                                 __func__, __LINE__,
1192                                 reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
1193                                 reg_cfg_cmd->u.dmi_info.len, cmd_len);
1194                         return -EINVAL;
1195                 }
1196                 break;
1197         }
1198
1199         default:
1200                 break;
1201         }
1202
1203         switch (reg_cfg_cmd->cmd_type) {
1204         case VFE_WRITE: {
1205                 msm_camera_io_memcpy(vfe_dev->vfe_base +
1206                         reg_cfg_cmd->u.rw_info.reg_offset,
1207                         cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
1208                         reg_cfg_cmd->u.rw_info.len);
1209                 break;
1210         }
1211         case VFE_WRITE_MB: {
1212                 msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
1213                         reg_cfg_cmd->u.rw_info.reg_offset,
1214                         cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
1215                         reg_cfg_cmd->u.rw_info.len);
1216                 break;
1217         }
1218         case VFE_CFG_MASK: {
1219                 uint32_t temp;
1220                 bool grab_lock;
1221                 unsigned long flags;
1222                 if ((UINT_MAX - sizeof(temp) <
1223                         reg_cfg_cmd->u.mask_info.reg_offset) ||
1224                         (vfe_dev->vfe_base_size <
1225                         reg_cfg_cmd->u.mask_info.reg_offset +
1226                         sizeof(temp)) ||
1227                         (reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
1228                         pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
1229                         return -EINVAL;
1230                 }
1231                 grab_lock = vfe_dev->hw_info->vfe_ops.core_ops.
1232                         is_module_cfg_lock_needed(reg_cfg_cmd->
1233                         u.mask_info.reg_offset);
1234                 if (grab_lock)
1235                         spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
1236                 temp = msm_camera_io_r(vfe_dev->vfe_base +
1237                         reg_cfg_cmd->u.mask_info.reg_offset);
1238
1239                 temp &= ~reg_cfg_cmd->u.mask_info.mask;
1240                 temp |= reg_cfg_cmd->u.mask_info.val;
1241                 msm_camera_io_w(temp, vfe_dev->vfe_base +
1242                         reg_cfg_cmd->u.mask_info.reg_offset);
1243                 if (grab_lock)
1244                         spin_unlock_irqrestore(&vfe_dev->shared_data_lock,
1245                                 flags);
1246                 break;
1247         }
1248         case VFE_WRITE_DMI_16BIT:
1249         case VFE_WRITE_DMI_32BIT:
1250         case VFE_WRITE_DMI_64BIT: {
1251                 int i;
1252                 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1253                 uint32_t hi_val, lo_val, lo_val1;
1254                 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
1255                         hi_tbl_ptr = cfg_data +
1256                                 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1257                 }
1258                 lo_tbl_ptr = cfg_data +
1259                         reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1260                 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
1261                         reg_cfg_cmd->u.dmi_info.len =
1262                                 reg_cfg_cmd->u.dmi_info.len / 2;
1263                 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1264                         lo_val = *lo_tbl_ptr++;
1265                         if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
1266                                 lo_val1 = lo_val & 0x0000FFFF;
1267                                 lo_val = (lo_val & 0xFFFF0000)>>16;
1268                                 msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
1269                                         vfe_dev->hw_info->dmi_reg_offset + 0x4);
1270                         } else if (reg_cfg_cmd->cmd_type ==
1271                                            VFE_WRITE_DMI_64BIT) {
1272                                 lo_tbl_ptr++;
1273                                 hi_val = *hi_tbl_ptr;
1274                                 hi_tbl_ptr = hi_tbl_ptr + 2;
1275                                 msm_camera_io_w(hi_val, vfe_dev->vfe_base +
1276                                         vfe_dev->hw_info->dmi_reg_offset);
1277                         }
1278                         msm_camera_io_w(lo_val, vfe_dev->vfe_base +
1279                                 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1280                 }
1281                 break;
1282         }
1283         case VFE_READ_DMI_16BIT:
1284         case VFE_READ_DMI_32BIT:
1285         case VFE_READ_DMI_64BIT: {
1286                 int i;
1287                 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1288                 uint32_t hi_val, lo_val, lo_val1;
1289                 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1290                         hi_tbl_ptr = cfg_data +
1291                                 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1292                 }
1293
1294                 lo_tbl_ptr = cfg_data +
1295                         reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1296
1297                 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
1298                         reg_cfg_cmd->u.dmi_info.len =
1299                                 reg_cfg_cmd->u.dmi_info.len / 2;
1300
1301                 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1302                         lo_val = msm_camera_io_r(vfe_dev->vfe_base +
1303                                         vfe_dev->hw_info->dmi_reg_offset + 0x4);
1304
1305                         if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
1306                                 lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
1307                                         vfe_dev->hw_info->dmi_reg_offset + 0x4);
1308                                 lo_val |= lo_val1 << 16;
1309                         }
1310                         *lo_tbl_ptr++ = lo_val;
1311                         if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1312                                 hi_val = msm_camera_io_r(vfe_dev->vfe_base +
1313                                         vfe_dev->hw_info->dmi_reg_offset);
1314                                 *hi_tbl_ptr = hi_val;
1315                                 hi_tbl_ptr += 2;
1316                                 lo_tbl_ptr++;
1317                         }
1318                 }
1319                 break;
1320         }
1321         case VFE_HW_UPDATE_LOCK: {
1322                 uint32_t update_id =
1323                         vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
1324                 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
1325                         || update_id == *cfg_data) {
1326                         pr_err("%s hw update lock failed acq %d, cur id %u, last id %u\n",
1327                                 __func__,
1328                                 *cfg_data,
1329                                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
1330                                 update_id);
1331                         return -EINVAL;
1332                 }
1333                 break;
1334         }
1335         case VFE_HW_UPDATE_UNLOCK: {
1336                 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
1337                         != *cfg_data) {
1338                         pr_err("hw update across frame boundary,begin id %u, end id %d\n",
1339                                 *cfg_data,
1340                                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
1341                 }
1342                 vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
1343                         vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1344                 break;
1345         }
1346         case VFE_READ: {
1347                 int i;
1348                 uint32_t *data_ptr = cfg_data +
1349                         reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
1350                 for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
1351                         if ((data_ptr < cfg_data) ||
1352                                 (UINT_MAX / sizeof(*data_ptr) <
1353                                  (data_ptr - cfg_data)) ||
1354                                 (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
1355                                  cmd_len))
1356                                 return -EINVAL;
1357                         *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
1358                                 reg_cfg_cmd->u.rw_info.reg_offset);
1359                         reg_cfg_cmd->u.rw_info.reg_offset += 4;
1360                 }
1361                 break;
1362         }
1363         case GET_MAX_CLK_RATE: {
1364                 int rc = 0;
1365                 unsigned long rate;
1366
1367                 if (cmd_len != sizeof(__u32)) {
1368                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1369                                 __func__, __LINE__, cmd_len,
1370                                 sizeof(__u32));
1371                         return -EINVAL;
1372                 }
1373                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_max_clk_rate(
1374                                                         vfe_dev, &rate);
1375                 if (rc < 0) {
1376                         pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1377                         return -EINVAL;
1378                 }
1379
1380                 *(__u32 *)cfg_data = (__u32)rate;
1381
1382                 break;
1383         }
1384         case GET_CLK_RATES: {
1385                 int rc = 0;
1386                 struct msm_isp_clk_rates rates;
1387                 struct msm_isp_clk_rates *user_data =
1388                         (struct msm_isp_clk_rates *)cfg_data;
1389                 if (cmd_len != sizeof(struct msm_isp_clk_rates)) {
1390                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1391                                 __func__, __LINE__, cmd_len,
1392                                 sizeof(struct msm_isp_clk_rates));
1393                         return -EINVAL;
1394                 }
1395                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(
1396                                                         vfe_dev, &rates);
1397                 if (rc < 0) {
1398                         pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1399                         return -EINVAL;
1400                 }
1401                 user_data->svs_rate = rates.svs_rate;
1402                 user_data->nominal_rate = rates.nominal_rate;
1403                 user_data->high_rate = rates.high_rate;
1404                 break;
1405         }
1406         case GET_ISP_ID: {
1407                 uint32_t *isp_id = NULL;
1408
1409                 if (cmd_len < sizeof(uint32_t)) {
1410                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1411                                 __func__, __LINE__, cmd_len,
1412                                 sizeof(uint32_t));
1413                         return -EINVAL;
1414                 }
1415
1416                 isp_id = (uint32_t *)cfg_data;
1417                 *isp_id = vfe_dev->pdev->id;
1418                 break;
1419         }
1420         case SET_WM_UB_SIZE:
1421                 break;
1422         case SET_UB_POLICY: {
1423
1424                 if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
1425                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1426                                 __func__, __LINE__, cmd_len,
1427                                 sizeof(vfe_dev->vfe_ub_policy));
1428                         return -EINVAL;
1429                 }
1430                 vfe_dev->vfe_ub_policy = *cfg_data;
1431                 break;
1432         }
1433         case GET_VFE_HW_LIMIT: {
1434                 uint32_t *hw_limit = NULL;
1435
1436                 if (cmd_len < sizeof(uint32_t)) {
1437                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1438                                 __func__, __LINE__, cmd_len,
1439                                 sizeof(uint32_t));
1440                         return -EINVAL;
1441                 }
1442
1443                 hw_limit = (uint32_t *)cfg_data;
1444                 *hw_limit = vfe_dev->vfe_hw_limit;
1445                 break;
1446         }
1447         }
1448         return 0;
1449 }
1450
1451 int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
1452 {
1453         int rc = 0, i;
1454         struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
1455         struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
1456         uint32_t *cfg_data = NULL;
1457
1458         if (!proc_cmd->num_cfg) {
1459                 pr_err("%s: Passed num_cfg as 0\n", __func__);
1460                 return -EINVAL;
1461         }
1462
1463         reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
1464                 proc_cmd->num_cfg, GFP_KERNEL);
1465         if (!reg_cfg_cmd) {
1466                 pr_err("%s: reg_cfg alloc failed\n", __func__);
1467                 rc = -ENOMEM;
1468                 goto reg_cfg_failed;
1469         }
1470
1471         if (copy_from_user(reg_cfg_cmd,
1472                 (void __user *)(proc_cmd->cfg_cmd),
1473                 sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
1474                 rc = -EFAULT;
1475                 goto copy_cmd_failed;
1476         }
1477
1478         if (proc_cmd->cmd_len > 0) {
1479                 cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
1480                 if (!cfg_data) {
1481                         pr_err("%s: cfg_data alloc failed\n", __func__);
1482                         rc = -ENOMEM;
1483                         goto cfg_data_failed;
1484                 }
1485
1486                 if (copy_from_user(cfg_data,
1487                         (void __user *)(proc_cmd->cfg_data),
1488                         proc_cmd->cmd_len)) {
1489                         rc = -EFAULT;
1490                         goto copy_cmd_failed;
1491                 }
1492         }
1493
1494         for (i = 0; i < proc_cmd->num_cfg; i++)
1495                 rc = msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
1496                         cfg_data, proc_cmd->cmd_len);
1497
1498         if (copy_to_user(proc_cmd->cfg_data,
1499                         cfg_data, proc_cmd->cmd_len)) {
1500                 rc = -EFAULT;
1501                 goto copy_cmd_failed;
1502         }
1503
1504 copy_cmd_failed:
1505         kfree(cfg_data);
1506 cfg_data_failed:
1507         kfree(reg_cfg_cmd);
1508 reg_cfg_failed:
1509         return rc;
1510 }
1511
1512 int msm_isp_send_event(struct vfe_device *vfe_dev,
1513         uint32_t event_type,
1514         struct msm_isp_event_data *event_data)
1515 {
1516         struct v4l2_event isp_event;
1517         memset(&isp_event, 0, sizeof(struct v4l2_event));
1518         isp_event.id = 0;
1519         isp_event.type = event_type;
1520         memcpy(&isp_event.u.data[0], event_data,
1521                 sizeof(struct msm_isp_event_data));
1522         v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
1523         return 0;
1524 }
1525
1526 #define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
1527
1528 int msm_isp_cal_word_per_line(uint32_t output_format,
1529         uint32_t pixel_per_line)
1530 {
1531         int val = -1;
1532         switch (output_format) {
1533         case V4L2_PIX_FMT_SBGGR8:
1534         case V4L2_PIX_FMT_SGBRG8:
1535         case V4L2_PIX_FMT_SGRBG8:
1536         case V4L2_PIX_FMT_SRGGB8:
1537         case V4L2_PIX_FMT_QBGGR8:
1538         case V4L2_PIX_FMT_QGBRG8:
1539         case V4L2_PIX_FMT_QGRBG8:
1540         case V4L2_PIX_FMT_QRGGB8:
1541         case V4L2_PIX_FMT_JPEG:
1542         case V4L2_PIX_FMT_META:
1543         case V4L2_PIX_FMT_GREY:
1544                 val = CAL_WORD(pixel_per_line, 1, 8);
1545                 break;
1546         case V4L2_PIX_FMT_SBGGR10:
1547         case V4L2_PIX_FMT_SGBRG10:
1548         case V4L2_PIX_FMT_SGRBG10:
1549         case V4L2_PIX_FMT_SRGGB10:
1550         case V4L2_PIX_FMT_SBGGR10DPCM6:
1551         case V4L2_PIX_FMT_SGBRG10DPCM6:
1552         case V4L2_PIX_FMT_SGRBG10DPCM6:
1553         case V4L2_PIX_FMT_SRGGB10DPCM6:
1554         case V4L2_PIX_FMT_SBGGR10DPCM8:
1555         case V4L2_PIX_FMT_SGBRG10DPCM8:
1556         case V4L2_PIX_FMT_SGRBG10DPCM8:
1557         case V4L2_PIX_FMT_SRGGB10DPCM8:
1558         case V4L2_PIX_FMT_META10:
1559                 val = CAL_WORD(pixel_per_line, 5, 32);
1560                 break;
1561         case V4L2_PIX_FMT_SBGGR12:
1562         case V4L2_PIX_FMT_SGBRG12:
1563         case V4L2_PIX_FMT_SGRBG12:
1564         case V4L2_PIX_FMT_SRGGB12:
1565                 val = CAL_WORD(pixel_per_line, 3, 16);
1566                 break;
1567         case V4L2_PIX_FMT_SBGGR14:
1568         case V4L2_PIX_FMT_SGBRG14:
1569         case V4L2_PIX_FMT_SGRBG14:
1570         case V4L2_PIX_FMT_SRGGB14:
1571                 val = CAL_WORD(pixel_per_line, 7, 32);
1572                 break;
1573         case V4L2_PIX_FMT_QBGGR10:
1574         case V4L2_PIX_FMT_QGBRG10:
1575         case V4L2_PIX_FMT_QGRBG10:
1576         case V4L2_PIX_FMT_QRGGB10:
1577                 val = CAL_WORD(pixel_per_line, 1, 6);
1578                 break;
1579         case V4L2_PIX_FMT_QBGGR12:
1580         case V4L2_PIX_FMT_QGBRG12:
1581         case V4L2_PIX_FMT_QGRBG12:
1582         case V4L2_PIX_FMT_QRGGB12:
1583                 val = CAL_WORD(pixel_per_line, 1, 5);
1584                 break;
1585         case V4L2_PIX_FMT_QBGGR14:
1586         case V4L2_PIX_FMT_QGBRG14:
1587         case V4L2_PIX_FMT_QGRBG14:
1588         case V4L2_PIX_FMT_QRGGB14:
1589                 val = CAL_WORD(pixel_per_line, 1, 4);
1590                 break;
1591         case V4L2_PIX_FMT_NV12:
1592         case V4L2_PIX_FMT_NV21:
1593         case V4L2_PIX_FMT_NV14:
1594         case V4L2_PIX_FMT_NV41:
1595         case V4L2_PIX_FMT_NV16:
1596         case V4L2_PIX_FMT_NV61:
1597                 val = CAL_WORD(pixel_per_line, 1, 8);
1598                 break;
1599         case V4L2_PIX_FMT_YUYV:
1600         case V4L2_PIX_FMT_YVYU:
1601         case V4L2_PIX_FMT_UYVY:
1602         case V4L2_PIX_FMT_VYUY:
1603                 val = CAL_WORD(pixel_per_line, 2, 8);
1604         break;
1605         case V4L2_PIX_FMT_P16BGGR10:
1606         case V4L2_PIX_FMT_P16GBRG10:
1607         case V4L2_PIX_FMT_P16GRBG10:
1608         case V4L2_PIX_FMT_P16RGGB10:
1609                 val = CAL_WORD(pixel_per_line, 1, 4);
1610         break;
1611         case V4L2_PIX_FMT_NV24:
1612         case V4L2_PIX_FMT_NV42:
1613                 val = CAL_WORD(pixel_per_line, 1, 8);
1614         break;
1615                 /*TD: Add more image format*/
1616         default:
1617                 msm_isp_print_fourcc_error(__func__, output_format);
1618                 break;
1619         }
1620         return val;
1621 }
1622
1623 enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
1624 {
1625         switch (output_format) {
1626         case V4L2_PIX_FMT_SBGGR8:
1627         case V4L2_PIX_FMT_SGBRG8:
1628         case V4L2_PIX_FMT_SGRBG8:
1629         case V4L2_PIX_FMT_SRGGB8:
1630         case V4L2_PIX_FMT_SBGGR10:
1631         case V4L2_PIX_FMT_SGBRG10:
1632         case V4L2_PIX_FMT_SGRBG10:
1633         case V4L2_PIX_FMT_SRGGB10:
1634         case V4L2_PIX_FMT_SBGGR10DPCM6:
1635         case V4L2_PIX_FMT_SGBRG10DPCM6:
1636         case V4L2_PIX_FMT_SGRBG10DPCM6:
1637         case V4L2_PIX_FMT_SRGGB10DPCM6:
1638         case V4L2_PIX_FMT_SBGGR10DPCM8:
1639         case V4L2_PIX_FMT_SGBRG10DPCM8:
1640         case V4L2_PIX_FMT_SGRBG10DPCM8:
1641         case V4L2_PIX_FMT_SRGGB10DPCM8:
1642         case V4L2_PIX_FMT_SBGGR12:
1643         case V4L2_PIX_FMT_SGBRG12:
1644         case V4L2_PIX_FMT_SGRBG12:
1645         case V4L2_PIX_FMT_SRGGB12:
1646         case V4L2_PIX_FMT_SBGGR14:
1647         case V4L2_PIX_FMT_SGBRG14:
1648         case V4L2_PIX_FMT_SGRBG14:
1649         case V4L2_PIX_FMT_SRGGB14:
1650                 return MIPI;
1651         case V4L2_PIX_FMT_QBGGR8:
1652         case V4L2_PIX_FMT_QGBRG8:
1653         case V4L2_PIX_FMT_QGRBG8:
1654         case V4L2_PIX_FMT_QRGGB8:
1655         case V4L2_PIX_FMT_QBGGR10:
1656         case V4L2_PIX_FMT_QGBRG10:
1657         case V4L2_PIX_FMT_QGRBG10:
1658         case V4L2_PIX_FMT_QRGGB10:
1659         case V4L2_PIX_FMT_QBGGR12:
1660         case V4L2_PIX_FMT_QGBRG12:
1661         case V4L2_PIX_FMT_QGRBG12:
1662         case V4L2_PIX_FMT_QRGGB12:
1663         case V4L2_PIX_FMT_QBGGR14:
1664         case V4L2_PIX_FMT_QGBRG14:
1665         case V4L2_PIX_FMT_QGRBG14:
1666         case V4L2_PIX_FMT_QRGGB14:
1667                 return QCOM;
1668         case V4L2_PIX_FMT_P16BGGR10:
1669         case V4L2_PIX_FMT_P16GBRG10:
1670         case V4L2_PIX_FMT_P16GRBG10:
1671         case V4L2_PIX_FMT_P16RGGB10:
1672                 return PLAIN16;
1673         default:
1674                 msm_isp_print_fourcc_error(__func__, output_format);
1675                 break;
1676         }
1677         return -EINVAL;
1678 }
1679
1680 int msm_isp_get_bit_per_pixel(uint32_t output_format)
1681 {
1682         switch (output_format) {
1683         case V4L2_PIX_FMT_Y4:
1684                 return 4;
1685         case V4L2_PIX_FMT_Y6:
1686                 return 6;
1687         case V4L2_PIX_FMT_SBGGR8:
1688         case V4L2_PIX_FMT_SGBRG8:
1689         case V4L2_PIX_FMT_SGRBG8:
1690         case V4L2_PIX_FMT_SRGGB8:
1691         case V4L2_PIX_FMT_QBGGR8:
1692         case V4L2_PIX_FMT_QGBRG8:
1693         case V4L2_PIX_FMT_QGRBG8:
1694         case V4L2_PIX_FMT_QRGGB8:
1695         case V4L2_PIX_FMT_JPEG:
1696         case V4L2_PIX_FMT_META:
1697         case V4L2_PIX_FMT_NV12:
1698         case V4L2_PIX_FMT_NV21:
1699         case V4L2_PIX_FMT_NV14:
1700         case V4L2_PIX_FMT_NV41:
1701         case V4L2_PIX_FMT_YVU410:
1702         case V4L2_PIX_FMT_YVU420:
1703         case V4L2_PIX_FMT_YUYV:
1704         case V4L2_PIX_FMT_YYUV:
1705         case V4L2_PIX_FMT_YVYU:
1706         case V4L2_PIX_FMT_UYVY:
1707         case V4L2_PIX_FMT_VYUY:
1708         case V4L2_PIX_FMT_YUV422P:
1709         case V4L2_PIX_FMT_YUV411P:
1710         case V4L2_PIX_FMT_Y41P:
1711         case V4L2_PIX_FMT_YUV444:
1712         case V4L2_PIX_FMT_YUV555:
1713         case V4L2_PIX_FMT_YUV565:
1714         case V4L2_PIX_FMT_YUV32:
1715         case V4L2_PIX_FMT_YUV410:
1716         case V4L2_PIX_FMT_YUV420:
1717         case V4L2_PIX_FMT_GREY:
1718         case V4L2_PIX_FMT_PAL8:
1719         case V4L2_PIX_FMT_UV8:
1720         case MSM_V4L2_PIX_FMT_META:
1721                 return 8;
1722         case V4L2_PIX_FMT_SBGGR10:
1723         case V4L2_PIX_FMT_SGBRG10:
1724         case V4L2_PIX_FMT_SGRBG10:
1725         case V4L2_PIX_FMT_SRGGB10:
1726         case V4L2_PIX_FMT_SBGGR10DPCM6:
1727         case V4L2_PIX_FMT_SGBRG10DPCM6:
1728         case V4L2_PIX_FMT_SGRBG10DPCM6:
1729         case V4L2_PIX_FMT_SRGGB10DPCM6:
1730         case V4L2_PIX_FMT_SBGGR10DPCM8:
1731         case V4L2_PIX_FMT_SGBRG10DPCM8:
1732         case V4L2_PIX_FMT_SGRBG10DPCM8:
1733         case V4L2_PIX_FMT_SRGGB10DPCM8:
1734         case V4L2_PIX_FMT_QBGGR10:
1735         case V4L2_PIX_FMT_QGBRG10:
1736         case V4L2_PIX_FMT_QGRBG10:
1737         case V4L2_PIX_FMT_QRGGB10:
1738         case V4L2_PIX_FMT_Y10:
1739         case V4L2_PIX_FMT_Y10BPACK:
1740         case V4L2_PIX_FMT_P16BGGR10:
1741         case V4L2_PIX_FMT_P16GBRG10:
1742         case V4L2_PIX_FMT_P16GRBG10:
1743         case V4L2_PIX_FMT_P16RGGB10:
1744         case V4L2_PIX_FMT_META10:
1745         case MSM_V4L2_PIX_FMT_META10:
1746                 return 10;
1747         case V4L2_PIX_FMT_SBGGR12:
1748         case V4L2_PIX_FMT_SGBRG12:
1749         case V4L2_PIX_FMT_SGRBG12:
1750         case V4L2_PIX_FMT_SRGGB12:
1751         case V4L2_PIX_FMT_QBGGR12:
1752         case V4L2_PIX_FMT_QGBRG12:
1753         case V4L2_PIX_FMT_QGRBG12:
1754         case V4L2_PIX_FMT_QRGGB12:
1755         case V4L2_PIX_FMT_Y12:
1756                 return 12;
1757         case V4L2_PIX_FMT_SBGGR14:
1758         case V4L2_PIX_FMT_SGBRG14:
1759         case V4L2_PIX_FMT_SGRBG14:
1760         case V4L2_PIX_FMT_SRGGB14:
1761         case V4L2_PIX_FMT_QBGGR14:
1762         case V4L2_PIX_FMT_QGBRG14:
1763         case V4L2_PIX_FMT_QGRBG14:
1764         case V4L2_PIX_FMT_QRGGB14:
1765                 return 14;
1766         case V4L2_PIX_FMT_NV16:
1767         case V4L2_PIX_FMT_NV61:
1768         case V4L2_PIX_FMT_Y16:
1769                 return 16;
1770         case V4L2_PIX_FMT_NV24:
1771         case V4L2_PIX_FMT_NV42:
1772                 return 24;
1773                 /*TD: Add more image format*/
1774         default:
1775                 msm_isp_print_fourcc_error(__func__, output_format);
1776                 pr_err("%s: Invalid output format %x\n",
1777                         __func__, output_format);
1778                 return -EINVAL;
1779         }
1780 }
1781
1782 void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
1783 {
1784         struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1785         error_info->info_dump_frame_count++;
1786 }
1787
1788
1789 static int msm_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
1790 {
1791         int rc = vfe_dev->buf_mgr->pagefault_debug_disable;
1792         uint32_t irq_status0, irq_status1;
1793         uint32_t overflow_mask;
1794         unsigned long irq_flags;
1795
1796         /* Check if any overflow bit is set */
1797         vfe_dev->hw_info->vfe_ops.core_ops.
1798                 get_overflow_mask(&overflow_mask);
1799         vfe_dev->hw_info->vfe_ops.irq_ops.
1800                 read_irq_status(vfe_dev, &irq_status0, &irq_status1);
1801         overflow_mask &= irq_status1;
1802         spin_lock_irqsave(
1803                 &vfe_dev->common_data->common_dev_data_lock, irq_flags);
1804         if (overflow_mask ||
1805                 atomic_read(&vfe_dev->error_info.overflow_state) !=
1806                         NO_OVERFLOW) {
1807                 spin_unlock_irqrestore(
1808                         &vfe_dev->common_data->common_dev_data_lock, irq_flags);
1809                 pr_err_ratelimited("%s: overflow detected during IOMMU\n",
1810                         __func__);
1811                 /* Don't treat the Overflow + Page fault scenario as fatal.
1812                  * Instead try to do a recovery. Using an existing event as
1813                  * as opposed to creating a new event.
1814                  */
1815                 msm_isp_halt_send_error(vfe_dev, ISP_EVENT_PING_PONG_MISMATCH);
1816         } else {
1817                 spin_unlock_irqrestore(
1818                         &vfe_dev->common_data->common_dev_data_lock, irq_flags);
1819                 pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %pK\n",
1820                         __func__, __LINE__,  vfe_dev->pdev->id, vfe_dev);
1821                 vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0);
1822                 msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
1823         }
1824
1825         if (vfe_dev->buf_mgr->pagefault_debug_disable == 0) {
1826                 vfe_dev->buf_mgr->pagefault_debug_disable = 1;
1827                 vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
1828                         vfe_dev->page_fault_addr);
1829                 msm_isp_print_ping_pong_address(vfe_dev,
1830                         vfe_dev->page_fault_addr);
1831                 vfe_dev->hw_info->vfe_ops.axi_ops.
1832                         read_wm_ping_pong_addr(vfe_dev);
1833         }
1834         return rc;
1835 }
1836
1837 void msm_isp_process_error_info(struct vfe_device *vfe_dev)
1838 {
1839         struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1840
1841         if (error_info->error_count == 1 ||
1842                 !(error_info->info_dump_frame_count % 100)) {
1843                 vfe_dev->hw_info->vfe_ops.core_ops.
1844                         process_error_status(vfe_dev);
1845                 error_info->error_mask0 = 0;
1846                 error_info->error_mask1 = 0;
1847                 error_info->camif_status = 0;
1848                 error_info->violation_status = 0;
1849         }
1850 }
1851
1852 static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
1853         uint32_t error_mask0, uint32_t error_mask1)
1854 {
1855         vfe_dev->error_info.error_mask0 |= error_mask0;
1856         vfe_dev->error_info.error_mask1 |= error_mask1;
1857         vfe_dev->error_info.error_count++;
1858 }
1859
1860 int msm_isp_process_overflow_irq(
1861         struct vfe_device *vfe_dev,
1862         uint32_t *irq_status0, uint32_t *irq_status1,
1863         uint8_t force_overflow)
1864 {
1865         uint32_t overflow_mask;
1866         uint32_t bus_err = 0;
1867         unsigned long flags;
1868
1869         /* if there are no active streams - do not start recovery */
1870         if (!vfe_dev->axi_data.num_active_stream)
1871                 return 0;
1872
1873         if (vfe_dev->hw_info->vfe_ops.core_ops.
1874                 get_bus_err_mask)
1875                 vfe_dev->hw_info->vfe_ops.core_ops.get_bus_err_mask(
1876                         vfe_dev, &bus_err, irq_status1);
1877         /* Mask out all other irqs if recovery is started */
1878         if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
1879                 uint32_t halt_restart_mask0, halt_restart_mask1;
1880                 vfe_dev->hw_info->vfe_ops.core_ops.
1881                 get_halt_restart_mask(&halt_restart_mask0,
1882                         &halt_restart_mask1);
1883                 *irq_status0 &= halt_restart_mask0;
1884                 *irq_status1 &= halt_restart_mask1;
1885
1886                 return 0;
1887         }
1888
1889         /* Check if any overflow bit is set */
1890         vfe_dev->hw_info->vfe_ops.core_ops.
1891                 get_overflow_mask(&overflow_mask);
1892         overflow_mask &= *irq_status1;
1893
1894         if (overflow_mask || force_overflow) {
1895                 struct msm_isp_event_data error_event;
1896                 int i;
1897                 struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
1898
1899                 spin_lock_irqsave(
1900                         &vfe_dev->common_data->common_dev_data_lock, flags);
1901
1902                 if (atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
1903                                 NO_OVERFLOW, OVERFLOW_DETECTED)) {
1904                         spin_unlock_irqrestore(
1905                                  &vfe_dev->common_data->common_dev_data_lock,
1906                                  flags);
1907                         return 0;
1908                 }
1909
1910                 if (vfe_dev->reset_pending == 1) {
1911                         pr_err_ratelimited("%s:%d overflow %x during reset\n",
1912                                 __func__, __LINE__, overflow_mask);
1913                         /* Clear overflow bits since reset is pending */
1914                         *irq_status1 &= ~overflow_mask;
1915                         spin_unlock_irqrestore(
1916                                  &vfe_dev->common_data->common_dev_data_lock,
1917                                  flags);
1918                         return 0;
1919                 }
1920                 pr_err_ratelimited("%s: vfe %d overflowmask %x,bus_error %x\n",
1921                         __func__, vfe_dev->pdev->id, overflow_mask, bus_err);
1922                 for (i = 0; i < axi_data->hw_info->num_wm; i++) {
1923                         if (!axi_data->free_wm[i])
1924                                 continue;
1925                         ISP_DBG("%s:wm %d assigned to stream handle %x\n",
1926                                 __func__, i, axi_data->free_wm[i]);
1927                 }
1928                 vfe_dev->recovery_irq0_mask = vfe_dev->irq0_mask;
1929                 vfe_dev->recovery_irq1_mask = vfe_dev->irq1_mask;
1930                 vfe_dev->hw_info->vfe_ops.core_ops.
1931                                 set_halt_restart_mask(vfe_dev);
1932                 vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0);
1933                 /* mask off other vfe if dual vfe is used */
1934                 if (vfe_dev->is_split) {
1935                         int other_vfe_id;
1936                         struct vfe_device *temp_vfe;
1937
1938                         other_vfe_id = (vfe_dev->pdev->id == ISP_VFE0) ?
1939                                 ISP_VFE1 : ISP_VFE0;
1940                         temp_vfe = vfe_dev->common_data->
1941                                 dual_vfe_res->vfe_dev[other_vfe_id];
1942
1943                         atomic_set(&temp_vfe->error_info.overflow_state,
1944                                 OVERFLOW_DETECTED);
1945                         temp_vfe->recovery_irq0_mask = temp_vfe->irq0_mask;
1946                         temp_vfe->recovery_irq1_mask = temp_vfe->irq1_mask;
1947                         temp_vfe->hw_info->vfe_ops.core_ops.
1948                                 set_halt_restart_mask(temp_vfe);
1949                         temp_vfe->hw_info->vfe_ops.axi_ops.halt(temp_vfe, 0);
1950                 }
1951
1952                 /* reset irq status so skip further process */
1953                 *irq_status0 = 0;
1954                 *irq_status1 = 0;
1955
1956                 if (atomic_read(&vfe_dev->error_info.overflow_state)
1957                         != HALT_ENFORCED) {
1958                         memset(&error_event, 0, sizeof(error_event));
1959                         error_event.frame_id =
1960                                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1961                         error_event.u.error_info.err_type =
1962                                 ISP_ERROR_BUS_OVERFLOW;
1963                         msm_isp_send_event(vfe_dev,
1964                                 ISP_EVENT_ERROR, &error_event);
1965                 }
1966                 spin_unlock_irqrestore(
1967                         &vfe_dev->common_data->common_dev_data_lock,
1968                         flags);
1969                 return 1;
1970         }
1971         return 0;
1972 }
1973
1974 void msm_isp_reset_burst_count_and_frame_drop(
1975         struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
1976 {
1977         if (stream_info->state != ACTIVE ||
1978                 stream_info->stream_type != BURST_STREAM) {
1979                 return;
1980         }
1981         if (stream_info->num_burst_capture != 0)
1982                 msm_isp_reset_framedrop(vfe_dev, stream_info);
1983 }
1984
1985 void msm_isp_prepare_irq_debug_info(struct vfe_device *vfe_dev,
1986         uint32_t irq_status0, uint32_t irq_status1)
1987 {
1988
1989         unsigned long flags;
1990         struct msm_vfe_irq_debug_info *irq_debug;
1991         uint8_t current_index;
1992
1993         spin_lock_irqsave(&vfe_dev->common_data->vfe_irq_dump.
1994                 common_dev_irq_dump_lock, flags);
1995         /* Fill current VFE debug info */
1996         current_index = vfe_dev->common_data->vfe_irq_dump.
1997                 current_irq_index % MAX_VFE_IRQ_DEBUG_DUMP_SIZE;
1998         irq_debug = &vfe_dev->common_data->vfe_irq_dump.
1999                 irq_debug[current_index];
2000         irq_debug->vfe_id = vfe_dev->pdev->id;
2001         irq_debug->core_id = smp_processor_id();
2002         msm_isp_get_timestamp(&irq_debug->ts, vfe_dev);
2003         irq_debug->irq_status0[vfe_dev->pdev->id] = irq_status0;
2004         irq_debug->irq_status1[vfe_dev->pdev->id] = irq_status1;
2005         irq_debug->ping_pong_status[vfe_dev->pdev->id] =
2006                 vfe_dev->hw_info->vfe_ops.axi_ops.
2007                         get_pingpong_status(vfe_dev);
2008         if (vfe_dev->is_split &&
2009                 (vfe_dev->common_data->
2010                 dual_vfe_res->vfe_dev[!vfe_dev->pdev->id])
2011                 && (vfe_dev->common_data->dual_vfe_res->
2012                 vfe_dev[!vfe_dev->pdev->id]->vfe_open_cnt)) {
2013                 /* Fill other VFE debug Info */
2014                 vfe_dev->hw_info->vfe_ops.irq_ops.read_irq_status(
2015                         vfe_dev->common_data->dual_vfe_res->
2016                         vfe_dev[!vfe_dev->pdev->id],
2017                         &irq_debug->irq_status0[!vfe_dev->pdev->id],
2018                         &irq_debug->irq_status1[!vfe_dev->pdev->id]);
2019                 irq_debug->ping_pong_status[!vfe_dev->pdev->id] =
2020                         vfe_dev->hw_info->vfe_ops.axi_ops.
2021                         get_pingpong_status(vfe_dev->common_data->
2022                         dual_vfe_res->vfe_dev[!vfe_dev->pdev->id]);
2023         }
2024         vfe_dev->common_data->vfe_irq_dump.current_irq_index++;
2025         spin_unlock_irqrestore(&vfe_dev->common_data->vfe_irq_dump.
2026                 common_dev_irq_dump_lock, flags);
2027 }
2028
2029 void msm_isp_prepare_tasklet_debug_info(struct vfe_device *vfe_dev,
2030         uint32_t irq_status0, uint32_t irq_status1,
2031         struct msm_isp_timestamp ts)
2032 {
2033         struct msm_vfe_irq_debug_info *irq_debug;
2034         uint8_t current_index;
2035         unsigned long flags;
2036
2037         spin_lock_irqsave(&vfe_dev->common_data->vfe_irq_dump.
2038                 common_dev_tasklet_dump_lock, flags);
2039         current_index = vfe_dev->common_data->vfe_irq_dump.
2040                 current_tasklet_index % MAX_VFE_IRQ_DEBUG_DUMP_SIZE;
2041         irq_debug = &vfe_dev->common_data->vfe_irq_dump.
2042                 tasklet_debug[current_index];
2043         irq_debug->vfe_id = vfe_dev->pdev->id;
2044         irq_debug->core_id = smp_processor_id();
2045         irq_debug->ts = ts;
2046         irq_debug->irq_status0[vfe_dev->pdev->id] = irq_status0;
2047         irq_debug->irq_status1[vfe_dev->pdev->id] = irq_status1;
2048         irq_debug->ping_pong_status[vfe_dev->pdev->id] =
2049                 vfe_dev->hw_info->vfe_ops.axi_ops.
2050                 get_pingpong_status(vfe_dev);
2051         vfe_dev->common_data->vfe_irq_dump.current_tasklet_index++;
2052         spin_unlock_irqrestore(&vfe_dev->common_data->vfe_irq_dump.
2053                 common_dev_tasklet_dump_lock, flags);
2054 }
2055
2056 static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
2057         uint32_t irq_status0, uint32_t irq_status1)
2058 {
2059         unsigned long flags;
2060         struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
2061         struct msm_vfe_tasklet *tasklet;
2062
2063         if (vfe_dev->is_split)
2064                 tasklet = &vfe_dev->common_data->tasklets[MAX_VFE];
2065         else
2066                 tasklet = &vfe_dev->common_data->tasklets[vfe_dev->pdev->id];
2067
2068         spin_lock_irqsave(&tasklet->tasklet_lock, flags);
2069         queue_cmd = &tasklet->tasklet_queue_cmd[tasklet->taskletq_idx];
2070         if (queue_cmd->cmd_used) {
2071                 pr_err_ratelimited("%s: Tasklet queue overflow: %d\n",
2072                         __func__, vfe_dev->pdev->id);
2073                 spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2074                 return;
2075         } else {
2076                 atomic_add(1, &vfe_dev->irq_cnt);
2077         }
2078         queue_cmd->vfeInterruptStatus0 = irq_status0;
2079         queue_cmd->vfeInterruptStatus1 = irq_status1;
2080         msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
2081
2082         queue_cmd->cmd_used = 1;
2083         queue_cmd->vfe_dev = vfe_dev;
2084
2085         tasklet->taskletq_idx = (tasklet->taskletq_idx + 1) %
2086                 MSM_VFE_TASKLETQ_SIZE;
2087         list_add_tail(&queue_cmd->list, &tasklet->tasklet_q);
2088         spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2089         tasklet_schedule(&tasklet->tasklet);
2090 }
2091
2092 irqreturn_t msm_isp_process_irq(int irq_num, void *data)
2093 {
2094         struct vfe_device *vfe_dev = (struct vfe_device *) data;
2095         uint32_t irq_status0, irq_status1;
2096         uint32_t error_mask0, error_mask1;
2097
2098         vfe_dev->hw_info->vfe_ops.irq_ops.
2099                 read_and_clear_irq_status(vfe_dev, &irq_status0, &irq_status1);
2100
2101         if ((irq_status0 == 0) && (irq_status1 == 0)) {
2102                 ISP_DBG("%s:VFE%d irq_status0 & 1 are both 0\n",
2103                         __func__, vfe_dev->pdev->id);
2104                 return IRQ_HANDLED;
2105         }
2106         if (vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq) {
2107                 vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq(
2108                                 vfe_dev, irq_status0);
2109         }
2110         if (msm_isp_process_overflow_irq(vfe_dev,
2111                 &irq_status0, &irq_status1, 0)) {
2112                 /* if overflow initiated no need to handle the interrupts */
2113                 pr_err("overflow processed\n");
2114                 return IRQ_HANDLED;
2115         }
2116
2117         vfe_dev->hw_info->vfe_ops.core_ops.
2118                 get_error_mask(&error_mask0, &error_mask1);
2119         error_mask0 &= irq_status0;
2120         error_mask1 &= irq_status1;
2121         irq_status0 &= ~error_mask0;
2122         irq_status1 &= ~error_mask1;
2123         if ((error_mask0 != 0) || (error_mask1 != 0))
2124                 msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
2125
2126         if ((irq_status0 == 0) && (irq_status1 == 0) &&
2127                 (!(((error_mask0 != 0) || (error_mask1 != 0)) &&
2128                  vfe_dev->error_info.error_count == 1))) {
2129                 ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
2130                 return IRQ_HANDLED;
2131         }
2132         msm_isp_prepare_irq_debug_info(vfe_dev, irq_status0, irq_status1);
2133         msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1);
2134
2135         return IRQ_HANDLED;
2136 }
2137
2138 void msm_isp_do_tasklet(unsigned long data)
2139 {
2140         unsigned long flags;
2141         struct msm_vfe_tasklet *tasklet = (struct msm_vfe_tasklet *)data;
2142         struct vfe_device *vfe_dev;
2143         struct msm_vfe_irq_ops *irq_ops;
2144         struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2145         struct msm_isp_timestamp ts;
2146         uint32_t irq_status0, irq_status1;
2147
2148         while (1) {
2149                 spin_lock_irqsave(&tasklet->tasklet_lock, flags);
2150                 queue_cmd = list_first_entry_or_null(&tasklet->tasklet_q,
2151                         struct msm_vfe_tasklet_queue_cmd, list);
2152                 if (!queue_cmd) {
2153                         spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2154                         break;
2155                 }
2156                 list_del_init(&queue_cmd->list);
2157                 vfe_dev = queue_cmd->vfe_dev;
2158                 queue_cmd->cmd_used = 0;
2159                 queue_cmd->vfe_dev = NULL;
2160                 irq_status0 = queue_cmd->vfeInterruptStatus0;
2161                 irq_status1 = queue_cmd->vfeInterruptStatus1;
2162                 ts = queue_cmd->ts;
2163                 spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2164                 if (vfe_dev->vfe_open_cnt == 0) {
2165                         pr_err("%s: VFE%d open cnt = %d, irq %x/%x\n",
2166                         __func__, vfe_dev->pdev->id, vfe_dev->vfe_open_cnt,
2167                         irq_status0, irq_status1);
2168                         continue;
2169                 }
2170                 atomic_sub(1, &vfe_dev->irq_cnt);
2171                 msm_isp_prepare_tasklet_debug_info(vfe_dev,
2172                         irq_status0, irq_status1, ts);
2173                 irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
2174                 irq_ops->process_reset_irq(vfe_dev,
2175                         irq_status0, irq_status1);
2176                 irq_ops->process_halt_irq(vfe_dev,
2177                         irq_status0, irq_status1);
2178                 if (atomic_read(&vfe_dev->error_info.overflow_state)
2179                         != NO_OVERFLOW) {
2180                         ISP_DBG("%s: Recovery in processing, Ignore IRQs!!!\n",
2181                                 __func__);
2182                         continue;
2183                 }
2184                 msm_isp_process_error_info(vfe_dev);
2185                 irq_ops->process_stats_irq(vfe_dev,
2186                         irq_status0, irq_status1, &ts);
2187                 irq_ops->process_axi_irq(vfe_dev,
2188                         irq_status0, irq_status1, &ts);
2189                 irq_ops->process_camif_irq(vfe_dev,
2190                         irq_status0, irq_status1, &ts);
2191                 irq_ops->process_reg_update(vfe_dev,
2192                         irq_status0, irq_status1, &ts);
2193                 irq_ops->process_epoch_irq(vfe_dev,
2194                         irq_status0, irq_status1, &ts);
2195         }
2196 }
2197
2198 int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
2199 {
2200         struct msm_vfe_axi_src_state *src_state = arg;
2201         if (src_state->input_src >= VFE_SRC_MAX)
2202                 return -EINVAL;
2203         vfe_dev->axi_data.src_info[src_state->input_src].active =
2204         src_state->src_active;
2205         vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
2206         src_state->src_frame_id;
2207         return 0;
2208 }
2209
2210 static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
2211         struct device *dev, unsigned long iova, int flags, void *token)
2212 {
2213         struct vfe_device *vfe_dev = NULL;
2214
2215         if (token) {
2216                 vfe_dev = (struct vfe_device *)token;
2217                 vfe_dev->page_fault_addr = iova;
2218                 if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops ||
2219                         !vfe_dev->axi_data.num_active_stream) {
2220                         pr_err("%s:%d buf_mgr %pK active strms %d\n", __func__,
2221                                 __LINE__, vfe_dev->buf_mgr,
2222                                 vfe_dev->axi_data.num_active_stream);
2223                         goto end;
2224                 }
2225
2226                 mutex_lock(&vfe_dev->core_mutex);
2227                 if (vfe_dev->vfe_open_cnt > 0) {
2228                         pr_err_ratelimited("%s: fault address is %lx\n",
2229                                 __func__, iova);
2230                         msm_isp_process_iommu_page_fault(vfe_dev);
2231                 } else {
2232                         pr_err("%s: no handling, vfe open cnt = %d\n",
2233                                 __func__, vfe_dev->vfe_open_cnt);
2234                 }
2235                 mutex_unlock(&vfe_dev->core_mutex);
2236         } else {
2237                 ISP_DBG("%s:%d] no token received: %pK\n",
2238                         __func__, __LINE__, token);
2239                 goto end;
2240         }
2241 end:
2242         return;
2243 }
2244
2245 int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2246 {
2247         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2248         long rc = 0;
2249
2250         ISP_DBG("%s open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2251
2252         if (vfe_dev->common_data == NULL ||
2253                 vfe_dev->common_data->dual_vfe_res == NULL) {
2254                 pr_err("%s: Error in probe. No common_data or dual vfe res\n",
2255                         __func__);
2256                 return -EINVAL;
2257         }
2258
2259         mutex_lock(&vfe_dev->realtime_mutex);
2260         mutex_lock(&vfe_dev->core_mutex);
2261
2262         if (vfe_dev->vfe_open_cnt++) {
2263                 mutex_unlock(&vfe_dev->core_mutex);
2264                 mutex_unlock(&vfe_dev->realtime_mutex);
2265                 return 0;
2266         }
2267
2268         vfe_dev->reset_pending = 0;
2269         vfe_dev->isp_sof_debug = 0;
2270         vfe_dev->isp_raw0_debug = 0;
2271         vfe_dev->isp_raw1_debug = 0;
2272         vfe_dev->isp_raw2_debug = 0;
2273
2274         if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
2275                 pr_err("%s: init hardware failed\n", __func__);
2276                 vfe_dev->vfe_open_cnt--;
2277                 mutex_unlock(&vfe_dev->core_mutex);
2278                 mutex_unlock(&vfe_dev->realtime_mutex);
2279                 return -EBUSY;
2280         }
2281
2282         memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2283         atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
2284
2285         vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
2286
2287         vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
2288         ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
2289         rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
2290         if (rc <= 0) {
2291                 pr_err("%s: reset timeout\n", __func__);
2292                 vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2293                 vfe_dev->vfe_open_cnt--;
2294                 mutex_unlock(&vfe_dev->core_mutex);
2295                 mutex_unlock(&vfe_dev->realtime_mutex);
2296                 return -EINVAL;
2297         }
2298
2299         vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
2300
2301         vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr,
2302                 "msm_isp");
2303
2304         memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
2305         memset(&vfe_dev->stats_data, 0,
2306                 sizeof(struct msm_vfe_stats_shared_data));
2307         memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2308         memset(&vfe_dev->fetch_engine_info, 0,
2309                 sizeof(vfe_dev->fetch_engine_info));
2310         vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
2311         vfe_dev->axi_data.enable_frameid_recovery = 0;
2312         vfe_dev->vt_enable = 0;
2313         vfe_dev->reg_update_requested = 0;
2314         /* Register page fault handler */
2315         vfe_dev->buf_mgr->pagefault_debug_disable = 0;
2316         /* initialize pd_buf_idx with an invalid index 0xF */
2317         vfe_dev->common_data->pd_buf_idx = 0xF;
2318
2319         cam_smmu_reg_client_page_fault_handler(
2320                         vfe_dev->buf_mgr->iommu_hdl,
2321                         msm_vfe_iommu_fault_handler,
2322                         NULL,
2323                         vfe_dev);
2324         mutex_unlock(&vfe_dev->core_mutex);
2325         mutex_unlock(&vfe_dev->realtime_mutex);
2326         return 0;
2327 }
2328
2329 #ifdef CONFIG_MSM_AVTIMER
2330 void msm_isp_end_avtimer(void)
2331 {
2332         avcs_core_disable_power_collapse(0);
2333 }
2334 #else
2335 void msm_isp_end_avtimer(void)
2336 {
2337         pr_err("AV Timer is not supported\n");
2338 }
2339 #endif
2340
2341 int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2342 {
2343         long rc = 0;
2344         int wm;
2345         int i;
2346         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2347         ISP_DBG("%s E open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2348         mutex_lock(&vfe_dev->realtime_mutex);
2349         mutex_lock(&vfe_dev->core_mutex);
2350
2351         if (!vfe_dev->vfe_open_cnt) {
2352                 pr_err("%s invalid state open cnt %d\n", __func__,
2353                         vfe_dev->vfe_open_cnt);
2354                 mutex_unlock(&vfe_dev->core_mutex);
2355                 mutex_unlock(&vfe_dev->realtime_mutex);
2356                 return -EINVAL;
2357         }
2358
2359         if (vfe_dev->vfe_open_cnt > 1) {
2360                 vfe_dev->vfe_open_cnt--;
2361                 mutex_unlock(&vfe_dev->core_mutex);
2362                 mutex_unlock(&vfe_dev->realtime_mutex);
2363                 return 0;
2364         }
2365         MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
2366         msm_isp_release_all_axi_stream(vfe_dev);
2367         msm_isp_release_all_stats_stream(vfe_dev);
2368
2369         /* Unregister page fault handler */
2370         cam_smmu_reg_client_page_fault_handler(
2371                 vfe_dev->buf_mgr->iommu_hdl,
2372                 NULL, NULL, vfe_dev);
2373
2374         rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
2375         if (rc <= 0)
2376                 pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
2377
2378         vfe_dev->hw_info->vfe_ops.core_ops.
2379                 update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
2380         vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 0);
2381
2382         /* put scratch buf in all the wm */
2383         for (wm = 0; wm < vfe_dev->axi_data.hw_info->num_wm; wm++) {
2384                 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PING_FLAG);
2385                 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PONG_FLAG);
2386         }
2387         vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2388         /* after regular hw stop, reduce open cnt */
2389         vfe_dev->vfe_open_cnt--;
2390         vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
2391         if (vfe_dev->vt_enable) {
2392                 msm_isp_end_avtimer();
2393                 vfe_dev->vt_enable = 0;
2394         }
2395         for (i = 0; i < VFE_SRC_MAX; i++)
2396                 vfe_dev->axi_data.src_info[i].lpm = 0;
2397         MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
2398         vfe_dev->is_split = 0;
2399
2400         mutex_unlock(&vfe_dev->core_mutex);
2401         mutex_unlock(&vfe_dev->realtime_mutex);
2402         return 0;
2403 }
2404
2405 void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
2406 {
2407         unsigned long flags;
2408         int i;
2409         struct msm_vfe_tasklet_queue_cmd *queue_cmd, *q_cmd_next;
2410         struct msm_vfe_tasklet *tasklet;
2411
2412         for (i = 0; i <= MAX_VFE; i++) {
2413                 if (i != vfe_dev->pdev->id &&
2414                         i != MAX_VFE)
2415                         continue;
2416                 tasklet = &vfe_dev->common_data->tasklets[i];
2417                 spin_lock_irqsave(&tasklet->tasklet_lock, flags);
2418                 list_for_each_entry_safe(queue_cmd, q_cmd_next,
2419                         &tasklet->tasklet_q, list) {
2420                         if (queue_cmd->vfe_dev != vfe_dev)
2421                                 continue;
2422                         list_del_init(&queue_cmd->list);
2423                         queue_cmd->cmd_used = 0;
2424                 }
2425                 spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
2426                 tasklet_kill(&tasklet->tasklet);
2427         }
2428         atomic_set(&vfe_dev->irq_cnt, 0);
2429
2430         return;
2431 }
2432
2433 void msm_isp_irq_debug_dump(struct vfe_device *vfe_dev)
2434 {
2435
2436         uint8_t i, dump_index;
2437         unsigned long flags;
2438
2439         spin_lock_irqsave(&vfe_dev->common_data->vfe_irq_dump.
2440                 common_dev_irq_dump_lock, flags);
2441         dump_index = vfe_dev->common_data->vfe_irq_dump.
2442                 current_irq_index;
2443         for (i = 0; i < MAX_VFE_IRQ_DEBUG_DUMP_SIZE; i++) {
2444                 trace_msm_cam_ping_pong_debug_dump(
2445                         vfe_dev->common_data->vfe_irq_dump.
2446                         irq_debug[dump_index % MAX_VFE_IRQ_DEBUG_DUMP_SIZE]);
2447                 dump_index++;
2448         }
2449         spin_unlock_irqrestore(&vfe_dev->common_data->vfe_irq_dump.
2450                 common_dev_irq_dump_lock, flags);
2451 }
2452
2453
2454 void msm_isp_tasklet_debug_dump(struct vfe_device *vfe_dev)
2455 {
2456
2457         uint8_t i, dump_index;
2458         unsigned long flags;
2459
2460         spin_lock_irqsave(&vfe_dev->common_data->vfe_irq_dump.
2461                 common_dev_tasklet_dump_lock, flags);
2462         dump_index = vfe_dev->common_data->vfe_irq_dump.
2463                 current_tasklet_index;
2464         for (i = 0; i < MAX_VFE_IRQ_DEBUG_DUMP_SIZE; i++) {
2465                 trace_msm_cam_tasklet_debug_dump(
2466                         vfe_dev->common_data->vfe_irq_dump.
2467                         tasklet_debug[
2468                         dump_index % MAX_VFE_IRQ_DEBUG_DUMP_SIZE]);
2469                 dump_index++;
2470         }
2471         spin_unlock_irqrestore(&vfe_dev->common_data->vfe_irq_dump.
2472                 common_dev_tasklet_dump_lock, flags);
2473 }
2474
2475 void msm_isp_dump_ping_pong_mismatch(struct vfe_device *vfe_dev)
2476 {
2477
2478         trace_msm_cam_string(" ***** msm_isp_dump_irq_debug ****");
2479         msm_isp_irq_debug_dump(vfe_dev);
2480         trace_msm_cam_string(" ***** msm_isp_dump_taskelet_debug ****");
2481         msm_isp_tasklet_debug_dump(vfe_dev);
2482 }