OSDN Git Service

Merge "ais: adjust frame id values to match user space of all platforms"
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / media / platform / msm / ais / isp / msm_isp_util.c
1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <media/v4l2-subdev.h>
16 #include <linux/ratelimit.h>
17
18 #include "msm.h"
19 #include "msm_isp_util.h"
20 #include "msm_isp_axi_util.h"
21 #include "msm_isp_stats_util.h"
22 #include "msm_camera_io_util.h"
23 #include "cam_smmu_api.h"
24 #define CREATE_TRACE_POINTS
25 #include "trace/events/msm_cam.h"
26
27 #define MAX_ISP_V4l2_EVENTS 100
28 #define MAX_ISP_REG_LIST 100
29 #define MAX_ISP_CMD_NUM 10
30 #define MAX_ISP_CMD_LEN 4096
31 static DEFINE_MUTEX(bandwidth_mgr_mutex);
32 static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
33
34 static uint64_t msm_isp_cpp_clk_rate;
35 static struct dump_ping_pong_state dump_data;
36 static struct dump_ping_pong_state tasklet_data;
37 static DEFINE_SPINLOCK(dump_irq_lock);
38 static DEFINE_SPINLOCK(dump_tasklet_lock);
39
40 #define VFE40_8974V2_VERSION 0x1001001A
41
42 void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
43 {
44         int i;
45         char text[5];
46
47         text[4] = '\0';
48         for (i = 0; i < 4; i++) {
49                 text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
50                 if ((text[i] < '0') || (text[i] > 'z')) {
51                         pr_err("%s: Invalid output format %d (unprintable)\n",
52                                 origin, fourcc_format);
53                         return;
54                 }
55         }
56         pr_err("%s: Invalid output format %s\n",
57                 origin, text);
58 }
59
60 int msm_isp_init_bandwidth_mgr(struct vfe_device *vfe_dev,
61                         enum msm_isp_hw_client client)
62 {
63         int rc = 0;
64
65         mutex_lock(&bandwidth_mgr_mutex);
66         if (isp_bandwidth_mgr.client_info[client].active) {
67                 mutex_unlock(&bandwidth_mgr_mutex);
68                 return rc;
69         }
70         isp_bandwidth_mgr.client_info[client].active = 1;
71         isp_bandwidth_mgr.use_count++;
72         if (vfe_dev && !isp_bandwidth_mgr.bus_client) {
73                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.init_bw_mgr(vfe_dev,
74                                 &isp_bandwidth_mgr);
75                 if (!rc) {
76                         isp_bandwidth_mgr.update_bw =
77                         vfe_dev->hw_info->vfe_ops.platform_ops.update_bw;
78                         isp_bandwidth_mgr.deinit_bw_mgr =
79                         vfe_dev->hw_info->vfe_ops.platform_ops.deinit_bw_mgr;
80                 }
81         }
82         if (rc) {
83                 isp_bandwidth_mgr.use_count--;
84                 isp_bandwidth_mgr.client_info[client].active = 0;
85         }
86
87         mutex_unlock(&bandwidth_mgr_mutex);
88         return rc;
89 }
90
91 int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
92         uint64_t ab, uint64_t ib)
93 {
94         int rc;
95
96         mutex_lock(&bandwidth_mgr_mutex);
97         if (!isp_bandwidth_mgr.use_count ||
98                 !isp_bandwidth_mgr.bus_client) {
99                 pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
100                         __func__, isp_bandwidth_mgr.use_count,
101                         isp_bandwidth_mgr.bus_client);
102                 mutex_unlock(&bandwidth_mgr_mutex);
103                 return -EINVAL;
104         }
105
106         isp_bandwidth_mgr.client_info[client].ab = ab;
107         isp_bandwidth_mgr.client_info[client].ib = ib;
108         rc = isp_bandwidth_mgr.update_bw(&isp_bandwidth_mgr);
109         mutex_unlock(&bandwidth_mgr_mutex);
110         return 0;
111 }
112
113 void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
114 {
115         if (client >= MAX_ISP_CLIENT) {
116                 pr_err("invalid Client id %d", client);
117                 return;
118         }
119         mutex_lock(&bandwidth_mgr_mutex);
120         memset(&isp_bandwidth_mgr.client_info[client], 0,
121                         sizeof(struct msm_isp_bandwidth_info));
122         if (isp_bandwidth_mgr.use_count) {
123                 isp_bandwidth_mgr.use_count--;
124                 if (isp_bandwidth_mgr.use_count) {
125                         mutex_unlock(&bandwidth_mgr_mutex);
126                         return;
127                 }
128
129                 if (!isp_bandwidth_mgr.bus_client) {
130                         pr_err("%s:%d error: bus client invalid\n",
131                                 __func__, __LINE__);
132                         mutex_unlock(&bandwidth_mgr_mutex);
133                         return;
134                 }
135
136                 isp_bandwidth_mgr.deinit_bw_mgr(
137                                 &isp_bandwidth_mgr);
138         }
139         mutex_unlock(&bandwidth_mgr_mutex);
140 }
141
142 void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
143                                 struct msm_isp_statistics *stats)
144 {
145         stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
146         stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
147         stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
148
149         stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
150         stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
151         stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
152
153         stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
154         stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
155         stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
156         stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
157         stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
158         stats->vfe_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
159         stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
160 }
161
162 void msm_isp_util_update_clk_rate(long clock_rate)
163 {
164         msm_isp_cpp_clk_rate = clock_rate;
165 }
166
167 uint32_t msm_isp_get_framedrop_period(
168         enum msm_vfe_frame_skip_pattern frame_skip_pattern)
169 {
170         switch (frame_skip_pattern) {
171         case NO_SKIP:
172         case EVERY_2FRAME:
173         case EVERY_3FRAME:
174         case EVERY_4FRAME:
175         case EVERY_5FRAME:
176         case EVERY_6FRAME:
177         case EVERY_7FRAME:
178         case EVERY_8FRAME:
179                 return frame_skip_pattern + 1;
180         case EVERY_16FRAME:
181                 return 16;
182         case EVERY_32FRAME:
183                 return 32;
184         case SKIP_ALL:
185                 return 1;
186         default:
187                 return 1;
188         }
189         return 1;
190 }
191
192 void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
193         struct vfe_device *vfe_dev)
194 {
195         struct timespec ts;
196
197         do_gettimeofday(&(time_stamp->event_time));
198         if (vfe_dev->vt_enable) {
199                 msm_isp_get_avtimer_ts(time_stamp);
200                 time_stamp->buf_time.tv_sec    = time_stamp->vt_time.tv_sec;
201                 time_stamp->buf_time.tv_usec   = time_stamp->vt_time.tv_usec;
202         } else  {
203                 get_monotonic_boottime(&ts);
204                 time_stamp->buf_time.tv_sec    = ts.tv_sec;
205                 time_stamp->buf_time.tv_usec   = ts.tv_nsec/1000;
206         }
207
208 }
209
210 static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
211 {
212         u32 evt_id = ISP_EVENT_SUBS_MASK_NONE;
213
214         switch (evt_mask) {
215         case ISP_EVENT_MASK_INDEX_STATS_NOTIFY:
216                 evt_id = ISP_EVENT_STATS_NOTIFY;
217                 break;
218         case ISP_EVENT_MASK_INDEX_ERROR:
219                 evt_id = ISP_EVENT_ERROR;
220                 break;
221         case ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT:
222                 evt_id = ISP_EVENT_IOMMU_P_FAULT;
223                 break;
224         case ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE:
225                 evt_id = ISP_EVENT_STREAM_UPDATE_DONE;
226                 break;
227         case ISP_EVENT_MASK_INDEX_REG_UPDATE:
228                 evt_id = ISP_EVENT_REG_UPDATE;
229                 break;
230         case ISP_EVENT_MASK_INDEX_SOF:
231                 evt_id = ISP_EVENT_SOF;
232                 break;
233         case ISP_EVENT_MASK_INDEX_BUF_DIVERT:
234                 evt_id = ISP_EVENT_BUF_DIVERT;
235                 break;
236         case ISP_EVENT_MASK_INDEX_BUF_DONE:
237                 evt_id = ISP_EVENT_BUF_DONE;
238                 break;
239         case ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY:
240                 evt_id = ISP_EVENT_COMP_STATS_NOTIFY;
241                 break;
242         case ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE:
243                 evt_id = ISP_EVENT_FE_READ_DONE;
244                 break;
245         case ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH:
246                 evt_id = ISP_EVENT_PING_PONG_MISMATCH;
247                 break;
248         case ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING:
249                 evt_id = ISP_EVENT_REG_UPDATE_MISSING;
250                 break;
251         case ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR:
252                 evt_id = ISP_EVENT_BUF_FATAL_ERROR;
253                 break;
254         default:
255                 evt_id = ISP_EVENT_SUBS_MASK_NONE;
256                 break;
257         }
258
259         return evt_id;
260 }
261
262 static inline int msm_isp_subscribe_event_mask(struct v4l2_fh *fh,
263                 struct v4l2_event_subscription *sub, int evt_mask_index,
264                 u32 evt_id, bool subscribe_flag)
265 {
266         int rc = 0, i, interface;
267
268         if (evt_mask_index == ISP_EVENT_MASK_INDEX_STATS_NOTIFY) {
269                 for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
270                         sub->type = evt_id + i;
271                         if (subscribe_flag)
272                                 rc = v4l2_event_subscribe(fh, sub,
273                                         MAX_ISP_V4l2_EVENTS, NULL);
274                         else
275                                 rc = v4l2_event_unsubscribe(fh, sub);
276                         if (rc != 0) {
277                                 pr_err("%s: Subs event_type =0x%x failed\n",
278                                         __func__, sub->type);
279                                 return rc;
280                         }
281                 }
282         } else if (evt_mask_index == ISP_EVENT_MASK_INDEX_SOF ||
283                    evt_mask_index == ISP_EVENT_MASK_INDEX_REG_UPDATE ||
284                    evt_mask_index == ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE) {
285                 for (interface = 0; interface < VFE_SRC_MAX; interface++) {
286                         sub->type = evt_id | interface;
287                         if (subscribe_flag)
288                                 rc = v4l2_event_subscribe(fh, sub,
289                                         MAX_ISP_V4l2_EVENTS, NULL);
290                         else
291                                 rc = v4l2_event_unsubscribe(fh, sub);
292                         if (rc != 0) {
293                                 pr_err("%s: Subs event_type =0x%x failed\n",
294                                         __func__, sub->type);
295                                 return rc;
296                         }
297                 }
298         } else {
299                 sub->type = evt_id;
300                 if (subscribe_flag)
301                         rc = v4l2_event_subscribe(fh, sub,
302                                 MAX_ISP_V4l2_EVENTS, NULL);
303                 else
304                         rc = v4l2_event_unsubscribe(fh, sub);
305                 if (rc != 0) {
306                         pr_err("%s: Subs event_type =0x%x failed\n",
307                                 __func__, sub->type);
308                         return rc;
309                 }
310         }
311         return rc;
312 }
313
314 static inline int msm_isp_process_event_subscription(struct v4l2_fh *fh,
315         struct v4l2_event_subscription *sub, bool subscribe_flag)
316 {
317         int rc = 0, evt_mask_index = 0;
318         u32 evt_mask = sub->type;
319         u32 evt_id = 0;
320
321         if (evt_mask == ISP_EVENT_SUBS_MASK_NONE) {
322                 pr_err("%s: Subs event_type is None=0x%x\n",
323                         __func__, evt_mask);
324                 return 0;
325         }
326
327         for (evt_mask_index = ISP_EVENT_MASK_INDEX_STATS_NOTIFY;
328                 evt_mask_index <= ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR;
329                 evt_mask_index++) {
330                 if (evt_mask & (1<<evt_mask_index)) {
331                         evt_id = msm_isp_evt_mask_to_isp_event(evt_mask_index);
332                         rc = msm_isp_subscribe_event_mask(fh, sub,
333                                 evt_mask_index, evt_id, subscribe_flag);
334                         if (rc != 0) {
335                                 pr_err("%s: Subs event index:%d failed\n",
336                                         __func__, evt_mask_index);
337                                 return rc;
338                         }
339                 }
340         }
341         return rc;
342 }
343
344 int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
345         struct v4l2_event_subscription *sub)
346 {
347         return msm_isp_process_event_subscription(fh, sub, true);
348 }
349
350 int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
351         struct v4l2_event_subscription *sub)
352 {
353         return msm_isp_process_event_subscription(fh, sub, false);
354 }
355
356 static int msm_isp_start_fetch_engine(struct vfe_device *vfe_dev,
357         void *arg)
358 {
359         struct msm_vfe_fetch_eng_start *fe_cfg = arg;
360         /*
361          * For Offline VFE, HAL expects same frame id
362          * for offline output which it requested in do_reprocess.
363          */
364         vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
365                 fe_cfg->frame_id;
366         return vfe_dev->hw_info->vfe_ops.core_ops.
367                 start_fetch_eng(vfe_dev, arg);
368 }
369
370 static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
371         void *arg)
372 {
373         struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
374         struct msm_vfe_axi_stream *stream_info = NULL;
375         int i = 0, rc;
376         uint32_t wm_reload_mask = 0;
377         /*
378          * For Offline VFE, HAL expects same frame id
379          * for offline output which it requested in do_reprocess.
380          */
381         vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
382                 fe_cfg->frame_id;
383
384         if (fe_cfg->offline_pass == OFFLINE_SECOND_PASS) {
385                 stream_info = &vfe_dev->axi_data.stream_info[
386                         HANDLE_TO_IDX(fe_cfg->output_stream_id)];
387                 if (!stream_info) {
388                         pr_err("%s: Couldn't find streamid 0x%X\n", __func__,
389                                 fe_cfg->output_stream_id);
390                         return -EINVAL;
391                 }
392                 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
393                         0, 1);
394                 msm_isp_reset_framedrop(vfe_dev, stream_info);
395
396                 rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
397                         VFE_PING_FLAG, fe_cfg->output_buf_idx);
398                 if (rc < 0) {
399                         pr_err("%s: Fetch engine config failed\n", __func__);
400                         return -EINVAL;
401                 }
402                 for (i = 0; i < stream_info->num_planes; i++) {
403                         vfe_dev->hw_info->vfe_ops.axi_ops.
404                         enable_wm(vfe_dev->vfe_base, stream_info->wm[i],
405                                         1);
406                         wm_reload_mask |= (1 << stream_info->wm[i]);
407                 }
408                 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
409                         VFE_SRC_MAX);
410                 vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
411                         vfe_dev->vfe_base, wm_reload_mask);
412         }
413         return vfe_dev->hw_info->vfe_ops.core_ops.
414                 start_fetch_eng_multi_pass(vfe_dev, arg);
415 }
416
417 void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
418         struct msm_vfe_fetch_engine_info *fetch_engine_info)
419 {
420         struct msm_isp_event_data fe_rd_done_event;
421
422         memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data));
423         fe_rd_done_event.frame_id =
424                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
425         fe_rd_done_event.u.fetch_done.session_id =
426                 fetch_engine_info->session_id;
427         fe_rd_done_event.u.fetch_done.stream_id = fetch_engine_info->stream_id;
428         fe_rd_done_event.u.fetch_done.handle = fetch_engine_info->bufq_handle;
429         fe_rd_done_event.u.fetch_done.buf_idx = fetch_engine_info->buf_idx;
430         fe_rd_done_event.u.fetch_done.fd = fetch_engine_info->fd;
431         fe_rd_done_event.u.fetch_done.offline_mode =
432                 fetch_engine_info->offline_mode;
433
434         ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
435                 __func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
436         fetch_engine_info->is_busy = 0;
437         msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
438 }
439
440 static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
441         struct msm_vfe_input_cfg *input_cfg)
442 {
443         int rc = 0;
444         struct msm_vfe_pix_cfg *pix_cfg = NULL;
445
446         pr_debug("%s: entry\n", __func__);
447
448         if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
449                 pr_err("%s: pixel path is active\n", __func__);
450                 return -EINVAL;
451         }
452
453         pix_cfg = &input_cfg->d.pix_cfg;
454
455         vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
456                 input_cfg->input_pix_clk;
457         vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
458                 input_cfg->d.pix_cfg.input_mux;
459         vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
460                 input_cfg->d.pix_cfg.input_format;
461         vfe_dev->axi_data.src_info[VFE_PIX_0].sof_counter_step = 1;
462
463         /*
464          * Fill pixel_clock into input_pix_clk so that user space
465          * can use rounded clk rate
466          */
467         input_cfg->input_pix_clk =
468                 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock;
469
470         ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
471                 input_cfg->d.pix_cfg.input_mux, CAMIF,
472                 input_cfg->d.pix_cfg.input_format);
473
474         if (input_cfg->d.pix_cfg.input_mux == CAMIF ||
475                 input_cfg->d.pix_cfg.input_mux == TESTGEN) {
476                 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
477                         input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
478                 if (input_cfg->d.pix_cfg.camif_cfg.subsample_cfg.
479                         sof_counter_step > 0) {
480                         vfe_dev->axi_data.src_info[VFE_PIX_0].
481                                 sof_counter_step = input_cfg->d.pix_cfg.
482                                 camif_cfg.subsample_cfg.sof_counter_step;
483                 }
484         } else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
485                 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
486                         input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
487         }
488         vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
489                         vfe_dev, &input_cfg->d.pix_cfg);
490         vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_PIX_0);
491
492         pr_debug("%s: exit\n", __func__);
493
494         return rc;
495 }
496
497 static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
498         struct msm_vfe_input_cfg *input_cfg)
499 {
500         int rc = 0;
501
502         if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
503                 pr_err("%s: RAW%d path is active\n", __func__,
504                            input_cfg->input_src - VFE_RAW_0);
505                 return -EINVAL;
506         }
507
508         vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
509                 input_cfg->input_pix_clk;
510         vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
511                 vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
512         return rc;
513 }
514
515 static int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
516 {
517         int rc = 0;
518         struct msm_vfe_input_cfg *input_cfg = arg;
519         long pixel_clock = 0;
520
521         switch (input_cfg->input_src) {
522         case VFE_PIX_0:
523                 rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
524                 break;
525         case VFE_RAW_0:
526         case VFE_RAW_1:
527         case VFE_RAW_2:
528                 rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
529                 break;
530         default:
531                 pr_err("%s: Invalid input source\n", __func__);
532                 rc = -EINVAL;
533         }
534
535         pixel_clock = input_cfg->input_pix_clk;
536         rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
537                 &pixel_clock);
538         if (rc < 0) {
539                 pr_err("%s: clock set rate failed\n", __func__);
540                 return rc;
541         }
542         return rc;
543 }
544
545 static int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg)
546 {
547         int rc = 0;
548         struct msm_vfe_camif_cfg *camif_cfg = arg;
549         struct msm_vfe_input_cfg input_cfg;
550         long pixel_clock = 0;
551
552         pr_debug("%s: entry\n", __func__);
553
554         memset(&input_cfg, 0, sizeof(input_cfg));
555
556         input_cfg.input_src = VFE_PIX_0;
557         input_cfg.input_pix_clk = 320000000;
558         input_cfg.d.pix_cfg.camif_cfg = *camif_cfg;
559
560         /* populate values from operation cfg */
561         input_cfg.d.pix_cfg.input_mux =
562                 vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux;
563         input_cfg.d.pix_cfg.camif_cfg.camif_input =
564                 vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input;
565
566         rc = msm_isp_cfg_pix(vfe_dev, &input_cfg);
567
568         pixel_clock = input_cfg.input_pix_clk;
569         rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
570                 &pixel_clock);
571         if (rc < 0) {
572                 pr_err("%s: clock set rate failed\n", __func__);
573                 return rc;
574         }
575
576         pr_debug("%s: exit\n", __func__);
577
578         return rc;
579 }
580
581
582 static int msm_isp_operation_cfg(struct vfe_device *vfe_dev, void *arg)
583 {
584         struct msm_vfe_operation_cfg *op_cfg = arg;
585
586         pr_debug("%s: entry\n", __func__);
587
588         vfe_dev->hvx_cmd = op_cfg->hvx_cmd;
589         vfe_dev->is_split = 0; /* default to false */
590
591         /* yuv_cosited currently not used */
592         /* pixel input select not used */
593
594         vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
595                 op_cfg->input_mux;
596         vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_pattern =
597                 op_cfg->pixel_pattern;
598         vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input = op_cfg->camif_input;
599
600         pr_debug("%s: exit\n", __func__);
601
602         return 0;
603 }
604
605
606 static int msm_isp_set_dual_HW_master_slave_mode(
607         struct vfe_device *vfe_dev, void *arg)
608 {
609         /*
610          * This method assumes no 2 processes are accessing it simultaneously.
611          * Currently this is guaranteed by mutex lock in ioctl.
612          * If that changes, need to revisit this
613          */
614         int rc = 0, i, j;
615         struct msm_isp_set_dual_hw_ms_cmd *dual_hw_ms_cmd = NULL;
616         struct msm_vfe_src_info *src_info = NULL;
617         unsigned long flags;
618
619         if (!vfe_dev || !arg) {
620                 pr_err("%s: Error! Invalid input vfe_dev %pK arg %pK\n",
621                         __func__, vfe_dev, arg);
622                 return -EINVAL;
623         }
624
625         dual_hw_ms_cmd = (struct msm_isp_set_dual_hw_ms_cmd *)arg;
626         vfe_dev->common_data->ms_resource.dual_hw_type = DUAL_HW_MASTER_SLAVE;
627         vfe_dev->vfe_ub_policy = MSM_WM_UB_EQUAL_SLICING;
628         if (dual_hw_ms_cmd->primary_intf < VFE_SRC_MAX) {
629                 ISP_DBG("%s: vfe %d primary_intf %d\n", __func__,
630                         vfe_dev->pdev->id, dual_hw_ms_cmd->primary_intf);
631                 src_info = &vfe_dev->axi_data.
632                         src_info[dual_hw_ms_cmd->primary_intf];
633                 src_info->dual_hw_ms_info.dual_hw_ms_type =
634                         dual_hw_ms_cmd->dual_hw_ms_type;
635         }
636
637         /* No lock needed here since ioctl lock protects 2 session from race */
638         if (src_info != NULL &&
639                 dual_hw_ms_cmd->dual_hw_ms_type == MS_TYPE_MASTER) {
640                 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
641                 ISP_DBG("%s: vfe %d Master\n", __func__, vfe_dev->pdev->id);
642
643                 src_info->dual_hw_ms_info.sof_info =
644                         &vfe_dev->common_data->ms_resource.master_sof_info;
645                 vfe_dev->common_data->ms_resource.sof_delta_threshold =
646                         dual_hw_ms_cmd->sof_delta_threshold;
647         } else if (src_info != NULL) {
648                 spin_lock_irqsave(
649                         &vfe_dev->common_data->common_dev_data_lock,
650                         flags);
651                 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
652                 ISP_DBG("%s: vfe %d Slave\n", __func__, vfe_dev->pdev->id);
653
654                 for (j = 0; j < MS_NUM_SLAVE_MAX; j++) {
655                         if (vfe_dev->common_data->ms_resource.
656                                 reserved_slave_mask & (1 << j))
657                                 continue;
658
659                         vfe_dev->common_data->ms_resource.reserved_slave_mask |=
660                                 (1 << j);
661                         vfe_dev->common_data->ms_resource.num_slave++;
662                         src_info->dual_hw_ms_info.sof_info =
663                                 &vfe_dev->common_data->ms_resource.
664                                 slave_sof_info[j];
665                         src_info->dual_hw_ms_info.slave_id = j;
666                         ISP_DBG("%s: Slave id %d\n", __func__, j);
667                         break;
668                 }
669                 spin_unlock_irqrestore(
670                         &vfe_dev->common_data->common_dev_data_lock,
671                         flags);
672
673                 if (j == MS_NUM_SLAVE_MAX) {
674                         pr_err("%s: Error! Cannot find free aux resource\n",
675                                 __func__);
676                         return -EBUSY;
677                 }
678         }
679         ISP_DBG("%s: vfe %d num_src %d\n", __func__, vfe_dev->pdev->id,
680                 dual_hw_ms_cmd->num_src);
681         if (dual_hw_ms_cmd->num_src > VFE_SRC_MAX) {
682                 pr_err("%s: Error! Invalid num_src %d\n", __func__,
683                         dual_hw_ms_cmd->num_src);
684                 return -EINVAL;
685         }
686         /* This for loop is for non-primary intf to be marked with Master/Slave
687          * in order for frame id sync. But their timestamp is not saved.
688          * So no sof_info resource is allocated
689          */
690         for (i = 0; i < dual_hw_ms_cmd->num_src; i++) {
691                 if (dual_hw_ms_cmd->input_src[i] >= VFE_SRC_MAX) {
692                         pr_err("%s: Error! Invalid SRC param %d\n", __func__,
693                                 dual_hw_ms_cmd->input_src[i]);
694                         return -EINVAL;
695                 }
696                 ISP_DBG("%s: vfe %d src %d type %d\n", __func__,
697                         vfe_dev->pdev->id, dual_hw_ms_cmd->input_src[i],
698                         dual_hw_ms_cmd->dual_hw_ms_type);
699                 src_info = &vfe_dev->axi_data.
700                         src_info[dual_hw_ms_cmd->input_src[i]];
701                 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
702                 src_info->dual_hw_ms_info.dual_hw_ms_type =
703                         dual_hw_ms_cmd->dual_hw_ms_type;
704         }
705
706         return rc;
707 }
708
709 static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
710 {
711         int rc = 0;
712         uint32_t count = 0;
713         struct msm_vfe_cfg_cmd_list *proc_cmd =
714                 (struct msm_vfe_cfg_cmd_list *)arg;
715         struct msm_vfe_cfg_cmd_list cmd, cmd_next;
716
717         if (!vfe_dev || !arg) {
718                 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
719                         vfe_dev, arg);
720                 return -EINVAL;
721         }
722
723         rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
724         if (rc < 0)
725                 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
726
727         cmd = *proc_cmd;
728
729         while (cmd.next) {
730                 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
731                         pr_err("%s:%d failed: next size %u != expected %zu\n",
732                                 __func__, __LINE__, cmd.next_size,
733                                 sizeof(struct msm_vfe_cfg_cmd_list));
734                         break;
735                 }
736                 if (++count >= MAX_ISP_REG_LIST) {
737                         pr_err("%s:%d Error exceeding the max register count:%u\n",
738                                 __func__, __LINE__, count);
739                         rc = -EFAULT;
740                         break;
741                 }
742                 if (copy_from_user(&cmd_next, (void __user *)cmd.next,
743                         sizeof(struct msm_vfe_cfg_cmd_list))) {
744                         rc = -EFAULT;
745                         continue;
746                 }
747
748                 rc = msm_isp_proc_cmd(vfe_dev, &cmd_next.cfg_cmd);
749                 if (rc < 0)
750                         pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
751
752                 cmd = cmd_next;
753         }
754         return rc;
755 }
756
757 #ifdef CONFIG_COMPAT
758 struct msm_vfe_cfg_cmd2_32 {
759         uint16_t num_cfg;
760         uint16_t cmd_len;
761         compat_caddr_t cfg_data;
762         compat_caddr_t cfg_cmd;
763 };
764
765 struct msm_vfe_cfg_cmd_list_32 {
766         struct msm_vfe_cfg_cmd2_32   cfg_cmd;
767         compat_caddr_t               next;
768         uint32_t                     next_size;
769 };
770
771 #define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
772         _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
773 #define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
774         _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
775
776 static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
777         struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
778 {
779         proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
780         proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
781         proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
782         proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
783 }
784
785 static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
786 {
787         int rc = 0;
788         uint32_t count = 0;
789         struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
790                 (struct msm_vfe_cfg_cmd_list_32 *)arg;
791         struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
792         struct msm_vfe_cfg_cmd2 current_cmd;
793
794         if (!vfe_dev || !arg) {
795                 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
796                         vfe_dev, arg);
797                 return -EINVAL;
798         }
799         msm_isp_compat_to_proc_cmd(&current_cmd, &proc_cmd->cfg_cmd);
800         rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
801         if (rc < 0)
802                 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
803
804         cmd = *proc_cmd;
805
806         while (compat_ptr(cmd.next) != NULL) {
807                 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
808                         pr_err("%s:%d failed: next size %u != expected %zu\n",
809                                 __func__, __LINE__, cmd.next_size,
810                                 sizeof(struct msm_vfe_cfg_cmd_list));
811                         break;
812                 }
813                 if (++count >= MAX_ISP_REG_LIST) {
814                         pr_err("%s:%d Error exceeding the max register count:%u\n",
815                                 __func__, __LINE__, count);
816                         rc = -EFAULT;
817                         break;
818                 }
819                 if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
820                         sizeof(struct msm_vfe_cfg_cmd_list_32))) {
821                         rc = -EFAULT;
822                         continue;
823                 }
824
825                 msm_isp_compat_to_proc_cmd(&current_cmd, &cmd_next.cfg_cmd);
826                 rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
827                 if (rc < 0)
828                         pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
829
830                 cmd = cmd_next;
831         }
832         return rc;
833 }
834
835 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
836 {
837         if (is_compat_task())
838                 return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
839         else
840                 return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
841 }
842 #else /* CONFIG_COMPAT */
843 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
844 {
845         return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
846 }
847 #endif /* CONFIG_COMPAT */
848
849 static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
850         unsigned int cmd, void *arg)
851 {
852         long rc = 0;
853         long rc2 = 0;
854         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
855
856         if (!vfe_dev || !vfe_dev->vfe_base) {
857                 pr_err("%s:%d failed: invalid params %pK\n",
858                         __func__, __LINE__, vfe_dev);
859                 if (vfe_dev)
860                         pr_err("%s:%d failed %pK\n", __func__,
861                                 __LINE__, vfe_dev->vfe_base);
862                 return -EINVAL;
863         }
864
865         /* use real time mutex for hard real-time ioctls such as
866          * buffer operations and register updates.
867          * Use core mutex for other ioctls that could take
868          * longer time to complete such as start/stop ISP streams
869          * which blocks until the hardware start/stop streaming
870          */
871         ISP_DBG("%s: cmd: %d\n", __func__, _IOC_TYPE(cmd));
872         switch (cmd) {
873         case VIDIOC_MSM_VFE_REG_CFG: {
874                 mutex_lock(&vfe_dev->realtime_mutex);
875                 rc = msm_isp_proc_cmd(vfe_dev, arg);
876                 mutex_unlock(&vfe_dev->realtime_mutex);
877                 break;
878         }
879         case VIDIOC_MSM_VFE_REG_LIST_CFG: {
880                 mutex_lock(&vfe_dev->realtime_mutex);
881                 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
882                 mutex_unlock(&vfe_dev->realtime_mutex);
883                 break;
884         }
885         case VIDIOC_MSM_ISP_REQUEST_BUFQ:
886                 /* fallthrough */
887         case VIDIOC_MSM_ISP_ENQUEUE_BUF:
888                 /* fallthrough */
889         case VIDIOC_MSM_ISP_DEQUEUE_BUF:
890                 /* fallthrough */
891         case VIDIOC_MSM_ISP_UNMAP_BUF: {
892                 mutex_lock(&vfe_dev->buf_mgr->lock);
893                 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
894                 mutex_unlock(&vfe_dev->buf_mgr->lock);
895                 break;
896         }
897         case VIDIOC_MSM_ISP_RELEASE_BUFQ: {
898                 if (vfe_dev->buf_mgr == NULL) {
899                         pr_err("%s: buf mgr NULL! rc = -1\n", __func__);
900                         rc = -EINVAL;
901                         return rc;
902                 }
903                 mutex_lock(&vfe_dev->buf_mgr->lock);
904                 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
905                 mutex_unlock(&vfe_dev->buf_mgr->lock);
906                 break;
907         }
908         case VIDIOC_MSM_ISP_REQUEST_STREAM:
909                 mutex_lock(&vfe_dev->core_mutex);
910                 rc = msm_isp_request_axi_stream(vfe_dev, arg);
911                 mutex_unlock(&vfe_dev->core_mutex);
912                 break;
913         case VIDIOC_MSM_ISP_RELEASE_STREAM:
914                 mutex_lock(&vfe_dev->core_mutex);
915                 rc = msm_isp_release_axi_stream(vfe_dev, arg);
916                 mutex_unlock(&vfe_dev->core_mutex);
917                 break;
918         case VIDIOC_MSM_ISP_CFG_STREAM:
919                 mutex_lock(&vfe_dev->core_mutex);
920                 rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
921                 mutex_unlock(&vfe_dev->core_mutex);
922                 break;
923         case VIDIOC_MSM_ISP_CFG_HW_STATE:
924                 mutex_lock(&vfe_dev->core_mutex);
925                 rc = msm_isp_update_stream_bandwidth(vfe_dev,
926                         *(enum msm_vfe_hw_state *)arg);
927                 mutex_unlock(&vfe_dev->core_mutex);
928                 break;
929         case VIDIOC_MSM_ISP_AXI_HALT:
930                 mutex_lock(&vfe_dev->core_mutex);
931                 rc = msm_isp_axi_halt(vfe_dev, arg);
932                 mutex_unlock(&vfe_dev->core_mutex);
933                 break;
934         case VIDIOC_MSM_ISP_AXI_RESET:
935                 mutex_lock(&vfe_dev->core_mutex);
936                 if (atomic_read(&vfe_dev->error_info.overflow_state)
937                         != HALT_ENFORCED) {
938                         rc = msm_isp_stats_reset(vfe_dev);
939                         rc2 = msm_isp_axi_reset(vfe_dev, arg);
940                         if (!rc && rc2)
941                                 rc = rc2;
942                 } else {
943                         pr_err_ratelimited("%s: no HW reset, halt enforced.\n",
944                                 __func__);
945                 }
946                 mutex_unlock(&vfe_dev->core_mutex);
947                 break;
948         case VIDIOC_MSM_ISP_AXI_RESTART:
949                 mutex_lock(&vfe_dev->core_mutex);
950                 if (atomic_read(&vfe_dev->error_info.overflow_state)
951                         != HALT_ENFORCED) {
952                         rc = msm_isp_stats_restart(vfe_dev);
953                         rc2 = msm_isp_axi_restart(vfe_dev, arg);
954                         if (!rc && rc2)
955                                 rc = rc2;
956                 } else {
957                         pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
958                                 __func__);
959                 }
960                 mutex_unlock(&vfe_dev->core_mutex);
961                 break;
962         case VIDIOC_MSM_ISP_INPUT_CFG:
963                 mutex_lock(&vfe_dev->core_mutex);
964                 rc = msm_isp_cfg_input(vfe_dev, arg);
965                 mutex_unlock(&vfe_dev->core_mutex);
966                 break;
967         case VIDIOC_MSM_ISP_AHB_CLK_CFG:
968                 mutex_lock(&vfe_dev->core_mutex);
969                 if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
970                         rc = vfe_dev->hw_info->vfe_ops.core_ops.
971                                         ahb_clk_cfg(vfe_dev, arg);
972                 else
973                         rc = -EOPNOTSUPP;
974                 mutex_unlock(&vfe_dev->core_mutex);
975                 break;
976         case VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE:
977                 mutex_lock(&vfe_dev->core_mutex);
978                 rc = msm_isp_set_dual_HW_master_slave_mode(vfe_dev, arg);
979                 mutex_unlock(&vfe_dev->core_mutex);
980                 break;
981         case VIDIOC_MSM_ISP_FETCH_ENG_START:
982         case VIDIOC_MSM_ISP_MAP_BUF_START_FE:
983                 mutex_lock(&vfe_dev->core_mutex);
984                 rc = msm_isp_start_fetch_engine(vfe_dev, arg);
985                 mutex_unlock(&vfe_dev->core_mutex);
986                 break;
987
988         case VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START:
989         case VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE:
990                 mutex_lock(&vfe_dev->core_mutex);
991                 rc = msm_isp_start_fetch_engine_multi_pass(vfe_dev, arg);
992                 mutex_unlock(&vfe_dev->core_mutex);
993                 break;
994         case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
995                 if (arg) {
996                         enum msm_vfe_input_src frame_src =
997                                 *((enum msm_vfe_input_src *)arg);
998                         vfe_dev->hw_info->vfe_ops.core_ops.
999                                 reg_update(vfe_dev, frame_src);
1000                 }
1001                 break;
1002         case VIDIOC_MSM_ISP_SET_SRC_STATE:
1003                 mutex_lock(&vfe_dev->core_mutex);
1004                 rc = msm_isp_set_src_state(vfe_dev, arg);
1005                 mutex_unlock(&vfe_dev->core_mutex);
1006                 break;
1007         case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
1008                 mutex_lock(&vfe_dev->core_mutex);
1009                 rc = msm_isp_request_stats_stream(vfe_dev, arg);
1010                 mutex_unlock(&vfe_dev->core_mutex);
1011                 break;
1012         case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
1013                 mutex_lock(&vfe_dev->core_mutex);
1014                 rc = msm_isp_release_stats_stream(vfe_dev, arg);
1015                 mutex_unlock(&vfe_dev->core_mutex);
1016                 break;
1017         case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
1018                 mutex_lock(&vfe_dev->core_mutex);
1019                 rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
1020                 mutex_unlock(&vfe_dev->core_mutex);
1021                 break;
1022         case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
1023                 mutex_lock(&vfe_dev->core_mutex);
1024                 rc = msm_isp_update_stats_stream(vfe_dev, arg);
1025                 mutex_unlock(&vfe_dev->core_mutex);
1026                 break;
1027         case VIDIOC_MSM_ISP_UPDATE_STREAM:
1028                 mutex_lock(&vfe_dev->core_mutex);
1029                 rc = msm_isp_update_axi_stream(vfe_dev, arg);
1030                 mutex_unlock(&vfe_dev->core_mutex);
1031                 break;
1032         case VIDIOC_MSM_ISP_SMMU_ATTACH:
1033                 mutex_lock(&vfe_dev->core_mutex);
1034                 rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
1035                 mutex_unlock(&vfe_dev->core_mutex);
1036                 break;
1037         case VIDIOC_MSM_ISP_OPERATION_CFG:
1038                 mutex_lock(&vfe_dev->core_mutex);
1039                 msm_isp_operation_cfg(vfe_dev, arg);
1040                 mutex_unlock(&vfe_dev->core_mutex);
1041                 break;
1042         case VIDIOC_MSM_ISP_AXI_OUTPUT_CFG:
1043                 mutex_lock(&vfe_dev->core_mutex);
1044                 rc = msm_isp_axi_output_cfg(vfe_dev, arg);
1045                 mutex_unlock(&vfe_dev->core_mutex);
1046                 break;
1047         case VIDIOC_MSM_ISP_CAMIF_CFG:
1048                 mutex_lock(&vfe_dev->core_mutex);
1049                 rc = msm_isp_camif_cfg(vfe_dev, arg);
1050                 mutex_unlock(&vfe_dev->core_mutex);
1051                 break;
1052         case MSM_SD_NOTIFY_FREEZE:
1053                 vfe_dev->isp_sof_debug = 0;
1054                 vfe_dev->isp_raw0_debug = 0;
1055                 vfe_dev->isp_raw1_debug = 0;
1056                 vfe_dev->isp_raw2_debug = 0;
1057                 break;
1058         case MSM_SD_UNNOTIFY_FREEZE:
1059                 break;
1060         case MSM_SD_SHUTDOWN:
1061                 while (vfe_dev->vfe_open_cnt != 0)
1062                         msm_isp_close_node(sd, NULL);
1063                 break;
1064
1065         default:
1066                 pr_err_ratelimited("%s: Invalid ISP command %d\n", __func__,
1067                                 cmd);
1068                 rc = -EINVAL;
1069         }
1070         return rc;
1071 }
1072
1073
1074 #ifdef CONFIG_COMPAT
1075 static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
1076         unsigned int cmd, void *arg)
1077 {
1078         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
1079         long rc = 0;
1080
1081         if (!vfe_dev || !vfe_dev->vfe_base) {
1082                 pr_err("%s:%d failed: invalid params %pK\n",
1083                         __func__, __LINE__, vfe_dev);
1084                 if (vfe_dev)
1085                         pr_err("%s:%d failed %pK\n", __func__,
1086                                 __LINE__, vfe_dev->vfe_base);
1087                 return -EINVAL;
1088         }
1089
1090         switch (cmd) {
1091         case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
1092                 struct msm_vfe_cfg_cmd2 proc_cmd;
1093
1094                 mutex_lock(&vfe_dev->realtime_mutex);
1095                 msm_isp_compat_to_proc_cmd(&proc_cmd,
1096                         (struct msm_vfe_cfg_cmd2_32 *) arg);
1097                 rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
1098                 mutex_unlock(&vfe_dev->realtime_mutex);
1099                 break;
1100         }
1101         case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
1102                 mutex_lock(&vfe_dev->realtime_mutex);
1103                 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
1104                 mutex_unlock(&vfe_dev->realtime_mutex);
1105                 break;
1106         }
1107         default:
1108                 return msm_isp_ioctl_unlocked(sd, cmd, arg);
1109         }
1110
1111         return rc;
1112 }
1113
1114 long msm_isp_ioctl(struct v4l2_subdev *sd,
1115         unsigned int cmd, void *arg)
1116 {
1117         return msm_isp_ioctl_compat(sd, cmd, arg);
1118 }
1119 #else /* CONFIG_COMPAT */
1120 long msm_isp_ioctl(struct v4l2_subdev *sd,
1121         unsigned int cmd, void *arg)
1122 {
1123         return msm_isp_ioctl_unlocked(sd, cmd, arg);
1124 }
1125 #endif /* CONFIG_COMPAT */
1126
1127 static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
1128         struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
1129         uint32_t *cfg_data, uint32_t cmd_len)
1130 {
1131         if (!vfe_dev || !reg_cfg_cmd) {
1132                 pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
1133                         __LINE__, vfe_dev, reg_cfg_cmd);
1134                 return -EINVAL;
1135         }
1136         if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
1137                 (!cfg_data || !cmd_len)) {
1138                 pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
1139                         __func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
1140                         cmd_len);
1141                 return -EINVAL;
1142         }
1143
1144         /* Validate input parameters */
1145         switch (reg_cfg_cmd->cmd_type) {
1146         case VFE_WRITE:
1147         case VFE_READ:
1148         case VFE_WRITE_MB: {
1149                 if ((reg_cfg_cmd->u.rw_info.reg_offset >
1150                         (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1151                         ((reg_cfg_cmd->u.rw_info.reg_offset +
1152                         reg_cfg_cmd->u.rw_info.len) >
1153                         vfe_dev->vfe_base_size) ||
1154                         (reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
1155                         pr_err_ratelimited("%s:%d regoffset %d len %d res %d\n",
1156                                 __func__, __LINE__,
1157                                 reg_cfg_cmd->u.rw_info.reg_offset,
1158                                 reg_cfg_cmd->u.rw_info.len,
1159                                 (uint32_t)vfe_dev->vfe_base_size);
1160                         return -EINVAL;
1161                 }
1162
1163                 if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
1164                         (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1165                         ((reg_cfg_cmd->u.rw_info.cmd_data_offset +
1166                         reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
1167                         pr_err_ratelimited("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
1168                                 __func__, __LINE__,
1169                                 reg_cfg_cmd->u.rw_info.cmd_data_offset,
1170                                 reg_cfg_cmd->u.rw_info.len, cmd_len);
1171                         return -EINVAL;
1172                 }
1173                 break;
1174         }
1175
1176         case VFE_WRITE_DMI_16BIT:
1177         case VFE_WRITE_DMI_32BIT:
1178         case VFE_WRITE_DMI_64BIT:
1179         case VFE_READ_DMI_16BIT:
1180         case VFE_READ_DMI_32BIT:
1181         case VFE_READ_DMI_64BIT: {
1182                 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
1183                         reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1184                         if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
1185                                 reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
1186                                 (reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
1187                                 reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
1188                                 (sizeof(uint32_t)))) {
1189                                 pr_err("%s:%d hi %d lo %d\n",
1190                                         __func__, __LINE__,
1191                                         reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1192                                         reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
1193                                 return -EINVAL;
1194                         }
1195                         if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
1196                                 pr_err("%s:%d len %d\n",
1197                                         __func__, __LINE__,
1198                                         reg_cfg_cmd->u.dmi_info.len);
1199                                 return -EINVAL;
1200                         }
1201                         if (((UINT_MAX -
1202                                 reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
1203                                 (reg_cfg_cmd->u.dmi_info.len -
1204                                 sizeof(uint32_t))) ||
1205                                 ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
1206                                 reg_cfg_cmd->u.dmi_info.len -
1207                                 sizeof(uint32_t)) > cmd_len)) {
1208                                 pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
1209                                         __func__, __LINE__,
1210                                         reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1211                                         reg_cfg_cmd->u.dmi_info.len, cmd_len);
1212                                 return -EINVAL;
1213                         }
1214                 }
1215                 if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
1216                         (UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
1217                         ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
1218                         reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
1219                         pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
1220                                 __func__, __LINE__,
1221                                 reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
1222                                 reg_cfg_cmd->u.dmi_info.len, cmd_len);
1223                         return -EINVAL;
1224                 }
1225                 break;
1226         }
1227
1228         default:
1229                 break;
1230         }
1231
1232         switch (reg_cfg_cmd->cmd_type) {
1233         case VFE_WRITE: {
1234                 msm_camera_io_memcpy(vfe_dev->vfe_base +
1235                         reg_cfg_cmd->u.rw_info.reg_offset,
1236                         (void __iomem *)
1237                         (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
1238                         reg_cfg_cmd->u.rw_info.len);
1239                 break;
1240         }
1241         case VFE_WRITE_MB: {
1242                 msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
1243                         reg_cfg_cmd->u.rw_info.reg_offset,
1244                         (void __iomem *)
1245                         (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
1246                         reg_cfg_cmd->u.rw_info.len);
1247                 break;
1248         }
1249         case VFE_CFG_MASK: {
1250                 uint32_t temp;
1251                 bool grab_lock;
1252                 unsigned long flags;
1253
1254                 if ((UINT_MAX - sizeof(temp) <
1255                         reg_cfg_cmd->u.mask_info.reg_offset) ||
1256                         (vfe_dev->vfe_base_size <
1257                         reg_cfg_cmd->u.mask_info.reg_offset +
1258                         sizeof(temp)) ||
1259                         (reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
1260                         pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
1261                         return -EINVAL;
1262                 }
1263                 grab_lock = vfe_dev->hw_info->vfe_ops.core_ops.
1264                         is_module_cfg_lock_needed(reg_cfg_cmd->
1265                         u.mask_info.reg_offset);
1266                 if (grab_lock)
1267                         spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
1268                 temp = msm_camera_io_r(vfe_dev->vfe_base +
1269                         reg_cfg_cmd->u.mask_info.reg_offset);
1270
1271                 temp &= ~reg_cfg_cmd->u.mask_info.mask;
1272                 temp |= reg_cfg_cmd->u.mask_info.val;
1273                 msm_camera_io_w(temp, vfe_dev->vfe_base +
1274                         reg_cfg_cmd->u.mask_info.reg_offset);
1275                 if (grab_lock)
1276                         spin_unlock_irqrestore(&vfe_dev->shared_data_lock,
1277                                 flags);
1278                 break;
1279         }
1280         case VFE_WRITE_DMI_16BIT:
1281         case VFE_WRITE_DMI_32BIT:
1282         case VFE_WRITE_DMI_64BIT: {
1283                 int i;
1284                 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1285                 uint32_t hi_val, lo_val, lo_val1;
1286
1287                 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
1288                         hi_tbl_ptr = cfg_data +
1289                                 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1290                 }
1291                 lo_tbl_ptr = cfg_data +
1292                         reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1293                 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
1294                         reg_cfg_cmd->u.dmi_info.len =
1295                                 reg_cfg_cmd->u.dmi_info.len / 2;
1296                 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1297                         lo_val = *lo_tbl_ptr++;
1298                         if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
1299                                 lo_val1 = lo_val & 0x0000FFFF;
1300                                 lo_val = (lo_val & 0xFFFF0000)>>16;
1301                                 msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
1302                                         vfe_dev->hw_info->dmi_reg_offset + 0x4);
1303                         } else if (reg_cfg_cmd->cmd_type ==
1304                                            VFE_WRITE_DMI_64BIT) {
1305                                 lo_tbl_ptr++;
1306                                 hi_val = *hi_tbl_ptr;
1307                                 hi_tbl_ptr = hi_tbl_ptr + 2;
1308                                 msm_camera_io_w(hi_val, vfe_dev->vfe_base +
1309                                         vfe_dev->hw_info->dmi_reg_offset);
1310                         }
1311                         msm_camera_io_w(lo_val, vfe_dev->vfe_base +
1312                                 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1313                 }
1314                 break;
1315         }
1316         case VFE_READ_DMI_16BIT:
1317         case VFE_READ_DMI_32BIT:
1318         case VFE_READ_DMI_64BIT: {
1319                 int i;
1320                 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1321                 uint32_t hi_val, lo_val, lo_val1;
1322
1323                 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1324                         hi_tbl_ptr = cfg_data +
1325                                 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1326                 }
1327
1328                 lo_tbl_ptr = cfg_data +
1329                         reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1330
1331                 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
1332                         reg_cfg_cmd->u.dmi_info.len =
1333                                 reg_cfg_cmd->u.dmi_info.len / 2;
1334
1335                 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1336                         lo_val = msm_camera_io_r(vfe_dev->vfe_base +
1337                                         vfe_dev->hw_info->dmi_reg_offset + 0x4);
1338
1339                         if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
1340                                 lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
1341                                         vfe_dev->hw_info->dmi_reg_offset + 0x4);
1342                                 lo_val |= lo_val1 << 16;
1343                         }
1344                         *lo_tbl_ptr++ = lo_val;
1345                         if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1346                                 hi_val = msm_camera_io_r(vfe_dev->vfe_base +
1347                                         vfe_dev->hw_info->dmi_reg_offset);
1348                                 *hi_tbl_ptr = hi_val;
1349                                 hi_tbl_ptr += 2;
1350                                 lo_tbl_ptr++;
1351                         }
1352                 }
1353                 break;
1354         }
1355         case VFE_HW_UPDATE_LOCK: {
1356                 uint32_t update_id =
1357                         vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
1358                 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
1359                         || update_id == *cfg_data) {
1360                         pr_err("%s hw update lock failed acq %d, cur id %u, last id %u\n",
1361                                 __func__,
1362                                 *cfg_data,
1363                                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
1364                                 update_id);
1365                         return -EINVAL;
1366                 }
1367                 break;
1368         }
1369         case VFE_HW_UPDATE_UNLOCK: {
1370                 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
1371                         != *cfg_data) {
1372                         pr_err("hw update across frame boundary,begin id %u, end id %d\n",
1373                                 *cfg_data,
1374                                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
1375                 }
1376                 vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
1377                         vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1378                 break;
1379         }
1380         case VFE_READ: {
1381                 int i;
1382                 uint32_t *data_ptr = cfg_data +
1383                         reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
1384                 for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
1385                         if ((data_ptr < cfg_data) ||
1386                                 (UINT_MAX / sizeof(*data_ptr) <
1387                                  (data_ptr - cfg_data)) ||
1388                                 (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
1389                                  cmd_len))
1390                                 return -EINVAL;
1391                         *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
1392                                 reg_cfg_cmd->u.rw_info.reg_offset);
1393                         reg_cfg_cmd->u.rw_info.reg_offset += 4;
1394                 }
1395                 break;
1396         }
1397         case GET_MAX_CLK_RATE: {
1398                 int rc = 0;
1399                 unsigned long rate;
1400
1401                 if (cmd_len != sizeof(__u32)) {
1402                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1403                                 __func__, __LINE__, cmd_len,
1404                                 sizeof(__u32));
1405                         return -EINVAL;
1406                 }
1407                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_max_clk_rate(
1408                                                         vfe_dev, &rate);
1409                 if (rc < 0) {
1410                         pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1411                         return -EINVAL;
1412                 }
1413
1414                 *(__u32 *)cfg_data = (__u32)rate;
1415
1416                 break;
1417         }
1418         case GET_CLK_RATES: {
1419                 int rc = 0;
1420                 struct msm_isp_clk_rates rates;
1421                 struct msm_isp_clk_rates *user_data =
1422                         (struct msm_isp_clk_rates *)cfg_data;
1423                 if (cmd_len != sizeof(struct msm_isp_clk_rates)) {
1424                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1425                                 __func__, __LINE__, cmd_len,
1426                                 sizeof(struct msm_isp_clk_rates));
1427                         return -EINVAL;
1428                 }
1429                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(
1430                                                         vfe_dev, &rates);
1431                 if (rc < 0) {
1432                         pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1433                         return -EINVAL;
1434                 }
1435                 user_data->svs_rate = rates.svs_rate;
1436                 user_data->nominal_rate = rates.nominal_rate;
1437                 user_data->high_rate = rates.high_rate;
1438                 break;
1439         }
1440         case GET_ISP_ID: {
1441                 uint32_t *isp_id = NULL;
1442
1443                 if (cmd_len < sizeof(uint32_t)) {
1444                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1445                                 __func__, __LINE__, cmd_len,
1446                                 sizeof(uint32_t));
1447                         return -EINVAL;
1448                 }
1449
1450                 isp_id = (uint32_t *)cfg_data;
1451                 *isp_id = vfe_dev->pdev->id;
1452                 break;
1453         }
1454         case SET_WM_UB_SIZE:
1455                 break;
1456         case SET_UB_POLICY: {
1457
1458                 if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
1459                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1460                                 __func__, __LINE__, cmd_len,
1461                                 sizeof(vfe_dev->vfe_ub_policy));
1462                         return -EINVAL;
1463                 }
1464                 vfe_dev->vfe_ub_policy = *cfg_data;
1465                 break;
1466         }
1467         }
1468         return 0;
1469 }
1470
1471 int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
1472 {
1473         int rc = 0, i;
1474         struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
1475         struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
1476         uint32_t *cfg_data = NULL;
1477
1478         if (!proc_cmd->num_cfg
1479                 || proc_cmd->num_cfg > MAX_ISP_CMD_NUM) {
1480                 pr_err("%s: num_cfg outside allowed range\n",
1481                         __func__);
1482                 return -EINVAL;
1483         }
1484
1485         reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
1486                 proc_cmd->num_cfg, GFP_KERNEL);
1487         if (!reg_cfg_cmd) {
1488                 rc = -ENOMEM;
1489                 goto reg_cfg_failed;
1490         }
1491
1492         if (copy_from_user(reg_cfg_cmd,
1493                 (void __user *)(proc_cmd->cfg_cmd),
1494                 sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
1495                 rc = -EFAULT;
1496                 goto copy_cmd_failed;
1497         }
1498
1499         if (proc_cmd->cmd_len > 0) {
1500                 if (proc_cmd->cmd_len > MAX_ISP_CMD_LEN) {
1501                         pr_err("%s: cmd_len exceed max allowed\n", __func__);
1502                         rc = -EINVAL;
1503                         goto cfg_data_failed;
1504                 }
1505
1506                 cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
1507                 if (!cfg_data) {
1508                         rc = -ENOMEM;
1509                         goto cfg_data_failed;
1510                 }
1511
1512                 if (copy_from_user(cfg_data,
1513                         (void __user *)(proc_cmd->cfg_data),
1514                         proc_cmd->cmd_len)) {
1515                         rc = -EFAULT;
1516                         goto copy_cmd_failed;
1517                 }
1518         }
1519
1520         for (i = 0; i < proc_cmd->num_cfg; i++)
1521                 rc = msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
1522                         cfg_data, proc_cmd->cmd_len);
1523
1524         if (copy_to_user(proc_cmd->cfg_data,
1525                         cfg_data, proc_cmd->cmd_len)) {
1526                 rc = -EFAULT;
1527                 goto copy_cmd_failed;
1528         }
1529
1530 copy_cmd_failed:
1531         kfree(cfg_data);
1532 cfg_data_failed:
1533         kfree(reg_cfg_cmd);
1534 reg_cfg_failed:
1535         return rc;
1536 }
1537
1538 int msm_isp_send_event(struct vfe_device *vfe_dev,
1539         uint32_t event_type,
1540         struct msm_isp_event_data *event_data)
1541 {
1542         struct v4l2_event isp_event;
1543
1544         memset(&isp_event, 0, sizeof(struct v4l2_event));
1545         isp_event.id = 0;
1546         isp_event.type = event_type;
1547
1548         memcpy(&isp_event.u.data[0], event_data,
1549                 sizeof(struct msm_isp_event_data));
1550         v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
1551         return 0;
1552 }
1553
1554 #define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
1555
1556 int msm_isp_cal_word_per_line(uint32_t output_format,
1557         uint32_t pixel_per_line)
1558 {
1559         int val = -1;
1560
1561         switch (output_format) {
1562         case V4L2_PIX_FMT_SBGGR8:
1563         case V4L2_PIX_FMT_SGBRG8:
1564         case V4L2_PIX_FMT_SGRBG8:
1565         case V4L2_PIX_FMT_SRGGB8:
1566         case V4L2_PIX_FMT_QBGGR8:
1567         case V4L2_PIX_FMT_QGBRG8:
1568         case V4L2_PIX_FMT_QGRBG8:
1569         case V4L2_PIX_FMT_QRGGB8:
1570         case V4L2_PIX_FMT_JPEG:
1571         case V4L2_PIX_FMT_META:
1572                 val = CAL_WORD(pixel_per_line, 1, 8);
1573                 break;
1574         case V4L2_PIX_FMT_SBGGR10:
1575         case V4L2_PIX_FMT_SGBRG10:
1576         case V4L2_PIX_FMT_SGRBG10:
1577         case V4L2_PIX_FMT_SRGGB10:
1578         case V4L2_PIX_FMT_Y10:
1579         case V4L2_PIX_FMT_SBGGR10DPCM6:
1580         case V4L2_PIX_FMT_SGBRG10DPCM6:
1581         case V4L2_PIX_FMT_SGRBG10DPCM6:
1582         case V4L2_PIX_FMT_SRGGB10DPCM6:
1583         case V4L2_PIX_FMT_SBGGR10DPCM8:
1584         case V4L2_PIX_FMT_SGBRG10DPCM8:
1585         case V4L2_PIX_FMT_SGRBG10DPCM8:
1586         case V4L2_PIX_FMT_SRGGB10DPCM8:
1587         case V4L2_PIX_FMT_META10:
1588                 val = CAL_WORD(pixel_per_line, 5, 32);
1589                 break;
1590         case V4L2_PIX_FMT_SBGGR12:
1591         case V4L2_PIX_FMT_SGBRG12:
1592         case V4L2_PIX_FMT_SGRBG12:
1593         case V4L2_PIX_FMT_SRGGB12:
1594         case V4L2_PIX_FMT_Y12:
1595                 val = CAL_WORD(pixel_per_line, 3, 16);
1596                 break;
1597         case V4L2_PIX_FMT_SBGGR14:
1598         case V4L2_PIX_FMT_SGBRG14:
1599         case V4L2_PIX_FMT_SGRBG14:
1600         case V4L2_PIX_FMT_SRGGB14:
1601                 val = CAL_WORD(pixel_per_line, 7, 32);
1602                 break;
1603         case V4L2_PIX_FMT_QBGGR10:
1604         case V4L2_PIX_FMT_QGBRG10:
1605         case V4L2_PIX_FMT_QGRBG10:
1606         case V4L2_PIX_FMT_QRGGB10:
1607                 val = CAL_WORD(pixel_per_line, 1, 6);
1608                 break;
1609         case V4L2_PIX_FMT_QBGGR12:
1610         case V4L2_PIX_FMT_QGBRG12:
1611         case V4L2_PIX_FMT_QGRBG12:
1612         case V4L2_PIX_FMT_QRGGB12:
1613                 val = CAL_WORD(pixel_per_line, 1, 5);
1614                 break;
1615         case V4L2_PIX_FMT_QBGGR14:
1616         case V4L2_PIX_FMT_QGBRG14:
1617         case V4L2_PIX_FMT_QGRBG14:
1618         case V4L2_PIX_FMT_QRGGB14:
1619                 val = CAL_WORD(pixel_per_line, 1, 4);
1620                 break;
1621         case V4L2_PIX_FMT_NV12:
1622         case V4L2_PIX_FMT_NV21:
1623         case V4L2_PIX_FMT_NV14:
1624         case V4L2_PIX_FMT_NV41:
1625         case V4L2_PIX_FMT_NV16:
1626         case V4L2_PIX_FMT_NV61:
1627         case V4L2_PIX_FMT_GREY:
1628                 val = CAL_WORD(pixel_per_line, 1, 8);
1629                 break;
1630         case V4L2_PIX_FMT_YUYV:
1631         case V4L2_PIX_FMT_YVYU:
1632         case V4L2_PIX_FMT_UYVY:
1633         case V4L2_PIX_FMT_VYUY:
1634                 val = CAL_WORD(pixel_per_line, 2, 8);
1635         break;
1636         case V4L2_PIX_FMT_P16BGGR10:
1637         case V4L2_PIX_FMT_P16GBRG10:
1638         case V4L2_PIX_FMT_P16GRBG10:
1639         case V4L2_PIX_FMT_P16RGGB10:
1640                 val = CAL_WORD(pixel_per_line, 1, 4);
1641         break;
1642         case V4L2_PIX_FMT_NV24:
1643         case V4L2_PIX_FMT_NV42:
1644                 val = CAL_WORD(pixel_per_line, 1, 8);
1645         break;
1646                 /* TD: Add more image format */
1647         default:
1648                 msm_isp_print_fourcc_error(__func__, output_format);
1649                 break;
1650         }
1651         return val;
1652 }
1653
1654 enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
1655 {
1656         switch (output_format) {
1657         case V4L2_PIX_FMT_SBGGR8:
1658         case V4L2_PIX_FMT_SGBRG8:
1659         case V4L2_PIX_FMT_SGRBG8:
1660         case V4L2_PIX_FMT_SRGGB8:
1661         case V4L2_PIX_FMT_SBGGR10:
1662         case V4L2_PIX_FMT_SGBRG10:
1663         case V4L2_PIX_FMT_SGRBG10:
1664         case V4L2_PIX_FMT_SRGGB10:
1665         case V4L2_PIX_FMT_SBGGR10DPCM6:
1666         case V4L2_PIX_FMT_SGBRG10DPCM6:
1667         case V4L2_PIX_FMT_SGRBG10DPCM6:
1668         case V4L2_PIX_FMT_SRGGB10DPCM6:
1669         case V4L2_PIX_FMT_SBGGR10DPCM8:
1670         case V4L2_PIX_FMT_SGBRG10DPCM8:
1671         case V4L2_PIX_FMT_SGRBG10DPCM8:
1672         case V4L2_PIX_FMT_SRGGB10DPCM8:
1673         case V4L2_PIX_FMT_SBGGR12:
1674         case V4L2_PIX_FMT_SGBRG12:
1675         case V4L2_PIX_FMT_SGRBG12:
1676         case V4L2_PIX_FMT_SRGGB12:
1677         case V4L2_PIX_FMT_SBGGR14:
1678         case V4L2_PIX_FMT_SGBRG14:
1679         case V4L2_PIX_FMT_SGRBG14:
1680         case V4L2_PIX_FMT_SRGGB14:
1681         case V4L2_PIX_FMT_GREY:
1682         case V4L2_PIX_FMT_Y10:
1683         case V4L2_PIX_FMT_Y12:
1684                 return MIPI;
1685         case V4L2_PIX_FMT_QBGGR8:
1686         case V4L2_PIX_FMT_QGBRG8:
1687         case V4L2_PIX_FMT_QGRBG8:
1688         case V4L2_PIX_FMT_QRGGB8:
1689         case V4L2_PIX_FMT_QBGGR10:
1690         case V4L2_PIX_FMT_QGBRG10:
1691         case V4L2_PIX_FMT_QGRBG10:
1692         case V4L2_PIX_FMT_QRGGB10:
1693         case V4L2_PIX_FMT_QBGGR12:
1694         case V4L2_PIX_FMT_QGBRG12:
1695         case V4L2_PIX_FMT_QGRBG12:
1696         case V4L2_PIX_FMT_QRGGB12:
1697         case V4L2_PIX_FMT_QBGGR14:
1698         case V4L2_PIX_FMT_QGBRG14:
1699         case V4L2_PIX_FMT_QGRBG14:
1700         case V4L2_PIX_FMT_QRGGB14:
1701                 return QCOM;
1702         case V4L2_PIX_FMT_P16BGGR10:
1703         case V4L2_PIX_FMT_P16GBRG10:
1704         case V4L2_PIX_FMT_P16GRBG10:
1705         case V4L2_PIX_FMT_P16RGGB10:
1706                 return PLAIN16;
1707         default:
1708                 msm_isp_print_fourcc_error(__func__, output_format);
1709                 break;
1710         }
1711         return -EINVAL;
1712 }
1713
1714 int msm_isp_get_bit_per_pixel(uint32_t output_format)
1715 {
1716         switch (output_format) {
1717         case V4L2_PIX_FMT_Y4:
1718                 return 4;
1719         case V4L2_PIX_FMT_Y6:
1720                 return 6;
1721         case V4L2_PIX_FMT_SBGGR8:
1722         case V4L2_PIX_FMT_SGBRG8:
1723         case V4L2_PIX_FMT_SGRBG8:
1724         case V4L2_PIX_FMT_SRGGB8:
1725         case V4L2_PIX_FMT_QBGGR8:
1726         case V4L2_PIX_FMT_QGBRG8:
1727         case V4L2_PIX_FMT_QGRBG8:
1728         case V4L2_PIX_FMT_QRGGB8:
1729         case V4L2_PIX_FMT_JPEG:
1730         case V4L2_PIX_FMT_META:
1731         case V4L2_PIX_FMT_NV12:
1732         case V4L2_PIX_FMT_NV21:
1733         case V4L2_PIX_FMT_NV14:
1734         case V4L2_PIX_FMT_NV41:
1735         case V4L2_PIX_FMT_YVU410:
1736         case V4L2_PIX_FMT_YVU420:
1737         case V4L2_PIX_FMT_YUYV:
1738         case V4L2_PIX_FMT_YYUV:
1739         case V4L2_PIX_FMT_YVYU:
1740         case V4L2_PIX_FMT_UYVY:
1741         case V4L2_PIX_FMT_VYUY:
1742         case V4L2_PIX_FMT_YUV422P:
1743         case V4L2_PIX_FMT_YUV411P:
1744         case V4L2_PIX_FMT_Y41P:
1745         case V4L2_PIX_FMT_YUV444:
1746         case V4L2_PIX_FMT_YUV555:
1747         case V4L2_PIX_FMT_YUV565:
1748         case V4L2_PIX_FMT_YUV32:
1749         case V4L2_PIX_FMT_YUV410:
1750         case V4L2_PIX_FMT_YUV420:
1751         case V4L2_PIX_FMT_GREY:
1752         case V4L2_PIX_FMT_PAL8:
1753         case V4L2_PIX_FMT_UV8:
1754         case MSM_V4L2_PIX_FMT_META:
1755                 return 8;
1756         case V4L2_PIX_FMT_SBGGR10:
1757         case V4L2_PIX_FMT_SGBRG10:
1758         case V4L2_PIX_FMT_SGRBG10:
1759         case V4L2_PIX_FMT_SRGGB10:
1760         case V4L2_PIX_FMT_SBGGR10DPCM6:
1761         case V4L2_PIX_FMT_SGBRG10DPCM6:
1762         case V4L2_PIX_FMT_SGRBG10DPCM6:
1763         case V4L2_PIX_FMT_SRGGB10DPCM6:
1764         case V4L2_PIX_FMT_SBGGR10DPCM8:
1765         case V4L2_PIX_FMT_SGBRG10DPCM8:
1766         case V4L2_PIX_FMT_SGRBG10DPCM8:
1767         case V4L2_PIX_FMT_SRGGB10DPCM8:
1768         case V4L2_PIX_FMT_QBGGR10:
1769         case V4L2_PIX_FMT_QGBRG10:
1770         case V4L2_PIX_FMT_QGRBG10:
1771         case V4L2_PIX_FMT_QRGGB10:
1772         case V4L2_PIX_FMT_Y10:
1773         case V4L2_PIX_FMT_Y10BPACK:
1774         case V4L2_PIX_FMT_P16BGGR10:
1775         case V4L2_PIX_FMT_P16GBRG10:
1776         case V4L2_PIX_FMT_P16GRBG10:
1777         case V4L2_PIX_FMT_P16RGGB10:
1778         case V4L2_PIX_FMT_META10:
1779         case MSM_V4L2_PIX_FMT_META10:
1780                 return 10;
1781         case V4L2_PIX_FMT_SBGGR12:
1782         case V4L2_PIX_FMT_SGBRG12:
1783         case V4L2_PIX_FMT_SGRBG12:
1784         case V4L2_PIX_FMT_SRGGB12:
1785         case V4L2_PIX_FMT_QBGGR12:
1786         case V4L2_PIX_FMT_QGBRG12:
1787         case V4L2_PIX_FMT_QGRBG12:
1788         case V4L2_PIX_FMT_QRGGB12:
1789         case V4L2_PIX_FMT_Y12:
1790                 return 12;
1791         case V4L2_PIX_FMT_SBGGR14:
1792         case V4L2_PIX_FMT_SGBRG14:
1793         case V4L2_PIX_FMT_SGRBG14:
1794         case V4L2_PIX_FMT_SRGGB14:
1795         case V4L2_PIX_FMT_QBGGR14:
1796         case V4L2_PIX_FMT_QGBRG14:
1797         case V4L2_PIX_FMT_QGRBG14:
1798         case V4L2_PIX_FMT_QRGGB14:
1799                 return 14;
1800         case V4L2_PIX_FMT_NV16:
1801         case V4L2_PIX_FMT_NV61:
1802         case V4L2_PIX_FMT_Y16:
1803                 return 16;
1804         case V4L2_PIX_FMT_NV24:
1805         case V4L2_PIX_FMT_NV42:
1806                 return 24;
1807                 /* TD: Add more image format */
1808         default:
1809                 msm_isp_print_fourcc_error(__func__, output_format);
1810                 pr_err("%s: Invalid output format %x\n",
1811                         __func__, output_format);
1812                 return -EINVAL;
1813         }
1814 }
1815
1816 void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
1817 {
1818         struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1819
1820         error_info->info_dump_frame_count++;
1821 }
1822
1823
1824 static int msm_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
1825 {
1826         int rc = vfe_dev->buf_mgr->pagefault_debug_disable;
1827
1828         pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %pK\n", __func__,
1829                 __LINE__,  vfe_dev->pdev->id, vfe_dev);
1830
1831         msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
1832
1833         if (vfe_dev->buf_mgr->pagefault_debug_disable == 0) {
1834                 vfe_dev->buf_mgr->pagefault_debug_disable = 1;
1835                 vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
1836                         vfe_dev->page_fault_addr);
1837                 msm_isp_print_ping_pong_address(vfe_dev,
1838                         vfe_dev->page_fault_addr);
1839                 vfe_dev->hw_info->vfe_ops.axi_ops.
1840                         read_wm_ping_pong_addr(vfe_dev);
1841         }
1842         return rc;
1843 }
1844
1845 void msm_isp_process_error_info(struct vfe_device *vfe_dev)
1846 {
1847         struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1848
1849         if (error_info->error_count == 1 ||
1850                 !(error_info->info_dump_frame_count % 100)) {
1851                 vfe_dev->hw_info->vfe_ops.core_ops.
1852                         process_error_status(vfe_dev);
1853                 error_info->error_mask0 = 0;
1854                 error_info->error_mask1 = 0;
1855                 error_info->camif_status = 0;
1856                 error_info->violation_status = 0;
1857         }
1858 }
1859
1860 static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
1861         uint32_t error_mask0, uint32_t error_mask1)
1862 {
1863         vfe_dev->error_info.error_mask0 |= error_mask0;
1864         vfe_dev->error_info.error_mask1 |= error_mask1;
1865         vfe_dev->error_info.error_count++;
1866 }
1867
1868 void msm_isp_process_overflow_irq(
1869         struct vfe_device *vfe_dev,
1870         uint32_t *irq_status0, uint32_t *irq_status1,
1871         uint32_t force_overflow)
1872 {
1873         uint32_t overflow_mask;
1874
1875         /* if there are no active streams - do not start recovery */
1876         if (!vfe_dev->axi_data.num_active_stream)
1877                 return;
1878
1879         /* Mask out all other irqs if recovery is started */
1880         if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
1881                 uint32_t halt_restart_mask0, halt_restart_mask1;
1882
1883                 vfe_dev->hw_info->vfe_ops.core_ops.
1884                 get_halt_restart_mask(&halt_restart_mask0,
1885                         &halt_restart_mask1);
1886                 *irq_status0 &= halt_restart_mask0;
1887                 *irq_status1 &= halt_restart_mask1;
1888
1889                 return;
1890         }
1891
1892         /* Check if any overflow bit is set */
1893         vfe_dev->hw_info->vfe_ops.core_ops.
1894                 get_overflow_mask(&overflow_mask);
1895         if (!force_overflow)
1896                 overflow_mask &= *irq_status1;
1897
1898         if (overflow_mask) {
1899                 struct msm_isp_event_data error_event;
1900
1901                 if (vfe_dev->reset_pending == 1) {
1902                         pr_err("%s:%d failed: overflow %x during reset\n",
1903                                 __func__, __LINE__, overflow_mask);
1904                         /* Clear overflow bits since reset is pending */
1905                         *irq_status1 &= ~overflow_mask;
1906                         return;
1907                 }
1908
1909                 ISP_DBG("%s: VFE%d Bus overflow detected: start recovery!\n",
1910                         __func__, vfe_dev->pdev->id);
1911
1912
1913                 /* maks off irq for current vfe */
1914                 atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
1915                         NO_OVERFLOW, OVERFLOW_DETECTED);
1916                 vfe_dev->recovery_irq0_mask = vfe_dev->irq0_mask;
1917                 vfe_dev->recovery_irq1_mask = vfe_dev->irq1_mask;
1918
1919                 vfe_dev->hw_info->vfe_ops.core_ops.
1920                         set_halt_restart_mask(vfe_dev);
1921
1922                 /* mask off other vfe if dual vfe is used */
1923                 if (vfe_dev->is_split) {
1924                         uint32_t other_vfe_id;
1925                         struct vfe_device *other_vfe_dev;
1926
1927                         other_vfe_id = (vfe_dev->pdev->id == ISP_VFE0) ?
1928                                 ISP_VFE1 : ISP_VFE0;
1929                         other_vfe_dev = vfe_dev->common_data->
1930                                 dual_vfe_res->vfe_dev[other_vfe_id];
1931                         if (other_vfe_dev) {
1932                                 other_vfe_dev->recovery_irq0_mask =
1933                                         other_vfe_dev->irq0_mask;
1934                                 other_vfe_dev->recovery_irq1_mask =
1935                                         other_vfe_dev->irq1_mask;
1936                         }
1937
1938                         atomic_cmpxchg(&(vfe_dev->common_data->dual_vfe_res->
1939                                 vfe_dev[other_vfe_id]->
1940                                 error_info.overflow_state),
1941                                 NO_OVERFLOW, OVERFLOW_DETECTED);
1942
1943                         vfe_dev->hw_info->vfe_ops.core_ops.
1944                                 set_halt_restart_mask(vfe_dev->common_data->
1945                                 dual_vfe_res->vfe_dev[other_vfe_id]);
1946                 }
1947
1948                 /* reset irq status so skip further process */
1949                 *irq_status0 = 0;
1950                 *irq_status1 = 0;
1951
1952                 /* send overflow event as needed */
1953                 if (atomic_read(&vfe_dev->error_info.overflow_state)
1954                         != HALT_ENFORCED) {
1955                         memset(&error_event, 0, sizeof(error_event));
1956                         error_event.frame_id =
1957                                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1958                         error_event.u.error_info.err_type =
1959                                 ISP_ERROR_BUS_OVERFLOW;
1960                         msm_isp_send_event(vfe_dev,
1961                                 ISP_EVENT_ERROR, &error_event);
1962                 }
1963         }
1964 }
1965
1966 void msm_isp_reset_burst_count_and_frame_drop(
1967         struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
1968 {
1969         if ((stream_info->state != ACTIVE) ||
1970                 (stream_info->stream_type != BURST_STREAM)) {
1971                 return;
1972         }
1973         if (stream_info->num_burst_capture != 0)
1974                 msm_isp_reset_framedrop(vfe_dev, stream_info);
1975 }
1976
1977 static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
1978         uint32_t irq_status0, uint32_t irq_status1,
1979         uint32_t ping_pong_status)
1980 {
1981         unsigned long flags;
1982         struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
1983
1984         spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
1985         queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
1986         if (queue_cmd->cmd_used) {
1987                 ISP_DBG("%s: Tasklet queue overflow: %d\n",
1988                         __func__, vfe_dev->pdev->id);
1989                 list_del(&queue_cmd->list);
1990         } else {
1991                 atomic_add(1, &vfe_dev->irq_cnt);
1992         }
1993         queue_cmd->vfeInterruptStatus0 = irq_status0;
1994         queue_cmd->vfeInterruptStatus1 = irq_status1;
1995         queue_cmd->vfePingPongStatus = ping_pong_status;
1996         msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
1997         queue_cmd->cmd_used = 1;
1998         vfe_dev->taskletq_idx = (vfe_dev->taskletq_idx + 1) %
1999                 MSM_VFE_TASKLETQ_SIZE;
2000         list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
2001         spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2002         tasklet_schedule(&vfe_dev->vfe_tasklet);
2003 }
2004
2005 irqreturn_t msm_isp_process_irq(int irq_num, void *data)
2006 {
2007         struct vfe_device *vfe_dev = (struct vfe_device *) data;
2008         uint32_t irq_status0, irq_status1, ping_pong_status;
2009         uint32_t error_mask0, error_mask1;
2010
2011         vfe_dev->hw_info->vfe_ops.irq_ops.
2012                 read_irq_status_and_clear(vfe_dev, &irq_status0, &irq_status1);
2013
2014         if ((irq_status0 == 0) && (irq_status1 == 0)) {
2015                 pr_err("%s:VFE%d irq_status0 & 1 are both 0\n",
2016                         __func__, vfe_dev->pdev->id);
2017                 return IRQ_HANDLED;
2018         }
2019
2020         ping_pong_status = vfe_dev->hw_info->vfe_ops.axi_ops.
2021                 get_pingpong_status(vfe_dev);
2022         if (vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq) {
2023                 vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq(vfe_dev,
2024                         irq_status0);
2025         }
2026         msm_isp_process_overflow_irq(vfe_dev,
2027                 &irq_status0, &irq_status1, 0);
2028
2029         vfe_dev->hw_info->vfe_ops.core_ops.
2030                 get_error_mask(&error_mask0, &error_mask1);
2031         error_mask0 &= irq_status0;
2032         error_mask1 &= irq_status1;
2033         irq_status0 &= ~error_mask0;
2034         irq_status1 &= ~error_mask1;
2035         if ((error_mask0 != 0) || (error_mask1 != 0))
2036                 msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
2037
2038         if ((irq_status0 == 0) && (irq_status1 == 0) &&
2039                 (!(((error_mask0 != 0) || (error_mask1 != 0)) &&
2040                  vfe_dev->error_info.error_count == 1))) {
2041                 ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
2042                 return IRQ_HANDLED;
2043         }
2044         dump_data.vfe_dev = (struct vfe_device *) data;
2045         if (vfe_dev->is_split &&
2046                 (vfe_dev->common_data->dual_vfe_res->vfe_dev[
2047                         !vfe_dev->pdev->id]) &&
2048                 (vfe_dev->common_data->dual_vfe_res->vfe_dev[
2049                         !vfe_dev->pdev->id]->vfe_open_cnt)) {
2050                 spin_lock(&dump_irq_lock);
2051                 dump_data.arr[dump_data.first].current_vfe_irq.
2052                         vfe_id = vfe_dev->pdev->id;
2053                 dump_data.arr[dump_data.first].current_vfe_irq.
2054                         irq_status0 = irq_status0;
2055                 dump_data.arr[dump_data.first].current_vfe_irq.
2056                         irq_status1 = irq_status1;
2057                 dump_data.arr[dump_data.first].current_vfe_irq.
2058                         ping_pong_status = ping_pong_status;
2059
2060                 dump_data.arr[dump_data.first].other_vfe.
2061                         vfe_id = (!vfe_dev->pdev->id);
2062                 vfe_dev->hw_info->vfe_ops.irq_ops.
2063                         read_irq_status(
2064                         vfe_dev->common_data->dual_vfe_res->vfe_dev[
2065                         !vfe_dev->pdev->id],
2066                         &dump_data.arr[dump_data.first].other_vfe.irq_status0,
2067                         &dump_data.arr[dump_data.first].other_vfe.irq_status1);
2068                         dump_data.arr[dump_data.first].other_vfe.
2069                 ping_pong_status =
2070                         vfe_dev->hw_info->vfe_ops.axi_ops.
2071                                 get_pingpong_status(
2072                                 vfe_dev->common_data->dual_vfe_res->vfe_dev[
2073                                         !vfe_dev->pdev->id]);
2074                 msm_isp_get_timestamp(&dump_data.arr[dump_data.first].
2075                         other_vfe.ts, vfe_dev);
2076                 dump_data.first =
2077                         (dump_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2078                 dump_data.fill_count++;
2079                 spin_unlock(&dump_irq_lock);
2080         }
2081         msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1,
2082                                         ping_pong_status);
2083
2084         return IRQ_HANDLED;
2085 }
2086
2087
2088 void msm_isp_do_tasklet(unsigned long data)
2089 {
2090         unsigned long flags;
2091         struct vfe_device *vfe_dev = (struct vfe_device *) data;
2092         struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
2093         struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2094         struct msm_isp_timestamp ts;
2095         uint32_t irq_status0, irq_status1, pingpong_status;
2096
2097         if (vfe_dev->vfe_base == NULL || vfe_dev->vfe_open_cnt == 0) {
2098                 ISP_DBG("%s: VFE%d open cnt = %d, device closed(base = %pK)\n",
2099                         __func__, vfe_dev->pdev->id, vfe_dev->vfe_open_cnt,
2100                         vfe_dev->vfe_base);
2101                 return;
2102         }
2103
2104         while (atomic_read(&vfe_dev->irq_cnt)) {
2105                 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
2106                 queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
2107                 struct msm_vfe_tasklet_queue_cmd, list);
2108
2109                 if (!queue_cmd) {
2110                         atomic_set(&vfe_dev->irq_cnt, 0);
2111                         spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2112                         return;
2113                 }
2114                 atomic_sub(1, &vfe_dev->irq_cnt);
2115                 list_del(&queue_cmd->list);
2116                 queue_cmd->cmd_used = 0;
2117                 irq_status0 = queue_cmd->vfeInterruptStatus0;
2118                 irq_status1 = queue_cmd->vfeInterruptStatus1;
2119                 pingpong_status = queue_cmd->vfePingPongStatus;
2120                 ts = queue_cmd->ts;
2121                 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2122                 ISP_DBG("%s: vfe_id %d status0: 0x%x status1: 0x%x\n",
2123                         __func__, vfe_dev->pdev->id, irq_status0, irq_status1);
2124                 if (vfe_dev->is_split) {
2125                         spin_lock(&dump_tasklet_lock);
2126                         tasklet_data.arr[tasklet_data.first].
2127                         current_vfe_irq.vfe_id = vfe_dev->pdev->id;
2128                         tasklet_data.arr[tasklet_data.first].
2129                         current_vfe_irq.core = smp_processor_id();
2130                         tasklet_data.arr[tasklet_data.first].
2131                         current_vfe_irq.irq_status0 = irq_status0;
2132                         tasklet_data.arr[tasklet_data.first].
2133                         current_vfe_irq.irq_status1 = irq_status1;
2134                         tasklet_data.arr[tasklet_data.first].
2135                         current_vfe_irq.ping_pong_status = pingpong_status;
2136                         tasklet_data.arr[tasklet_data.first].
2137                         current_vfe_irq.ts = ts;
2138                         tasklet_data.first =
2139                         (tasklet_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2140                         spin_unlock(&dump_tasklet_lock);
2141                 }
2142                 irq_ops->process_reset_irq(vfe_dev,
2143                         irq_status0, irq_status1);
2144                 irq_ops->process_halt_irq(vfe_dev,
2145                         irq_status0, irq_status1);
2146                 if (atomic_read(&vfe_dev->error_info.overflow_state)
2147                         != NO_OVERFLOW) {
2148                         ISP_DBG("%s: Recovery in processing, Ignore IRQs!!!\n",
2149                                 __func__);
2150                         continue;
2151                 }
2152                 msm_isp_process_error_info(vfe_dev);
2153                 irq_ops->process_stats_irq(vfe_dev,
2154                         irq_status0, irq_status1,
2155                         pingpong_status, &ts);
2156                 irq_ops->process_axi_irq(vfe_dev,
2157                         irq_status0, irq_status1,
2158                         pingpong_status, &ts);
2159                 irq_ops->process_camif_irq(vfe_dev,
2160                         irq_status0, irq_status1, &ts);
2161                 irq_ops->process_reg_update(vfe_dev,
2162                         irq_status0, irq_status1, &ts);
2163                 irq_ops->process_sof_irq(vfe_dev,
2164                         irq_status0, irq_status1, &ts);
2165                 irq_ops->process_epoch_irq(vfe_dev,
2166                         irq_status0, irq_status1, &ts);
2167         }
2168 }
2169
2170 int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
2171 {
2172         struct msm_vfe_axi_src_state *src_state = arg;
2173
2174         if (src_state->input_src >= VFE_SRC_MAX)
2175                 return -EINVAL;
2176         vfe_dev->axi_data.src_info[src_state->input_src].active =
2177         src_state->src_active;
2178         vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
2179         src_state->src_frame_id;
2180         return 0;
2181 }
2182
2183 static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
2184         struct device *dev, unsigned long iova, int flags, void *token)
2185 {
2186         struct vfe_device *vfe_dev = NULL;
2187
2188         if (token) {
2189                 vfe_dev = (struct vfe_device *)token;
2190                 vfe_dev->page_fault_addr = iova;
2191                 if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops ||
2192                         !vfe_dev->axi_data.num_active_stream) {
2193                         pr_err("%s:%d buf_mgr %pK active strms %d\n", __func__,
2194                                 __LINE__, vfe_dev->buf_mgr,
2195                                 vfe_dev->axi_data.num_active_stream);
2196                         goto end;
2197                 }
2198
2199                 mutex_lock(&vfe_dev->core_mutex);
2200                 if (vfe_dev->vfe_open_cnt > 0) {
2201                         atomic_set(&vfe_dev->error_info.overflow_state,
2202                                 HALT_ENFORCED);
2203                         msm_isp_process_iommu_page_fault(vfe_dev);
2204                 } else {
2205                         pr_err("%s: no handling, vfe open cnt = %d\n",
2206                                 __func__, vfe_dev->vfe_open_cnt);
2207                 }
2208                 mutex_unlock(&vfe_dev->core_mutex);
2209         } else {
2210                 ISP_DBG("%s:%d] no token received: %pK\n",
2211                         __func__, __LINE__, token);
2212                 goto end;
2213         }
2214 end:
2215         return;
2216 }
2217
2218 int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2219 {
2220         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2221         long rc = 0;
2222         enum cam_ahb_clk_client id;
2223
2224         ISP_DBG("%s open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2225
2226         if (vfe_dev->common_data == NULL ||
2227                 vfe_dev->common_data->dual_vfe_res == NULL) {
2228                 pr_err("%s: Error in probe. No common_data or dual vfe res\n",
2229                         __func__);
2230                 return -EINVAL;
2231         }
2232
2233         if (vfe_dev->pdev->id == ISP_VFE0)
2234                 vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
2235
2236         mutex_lock(&vfe_dev->realtime_mutex);
2237         mutex_lock(&vfe_dev->core_mutex);
2238
2239         if (vfe_dev->vfe_open_cnt++) {
2240                 mutex_unlock(&vfe_dev->core_mutex);
2241                 mutex_unlock(&vfe_dev->realtime_mutex);
2242                 return 0;
2243         }
2244
2245         vfe_dev->reset_pending = 0;
2246         vfe_dev->isp_sof_debug = 0;
2247         vfe_dev->isp_raw0_debug = 0;
2248         vfe_dev->isp_raw1_debug = 0;
2249         vfe_dev->isp_raw2_debug = 0;
2250
2251         if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
2252                 pr_err("%s: init hardware failed\n", __func__);
2253                 vfe_dev->vfe_open_cnt--;
2254                 mutex_unlock(&vfe_dev->core_mutex);
2255                 mutex_unlock(&vfe_dev->realtime_mutex);
2256                 return -EBUSY;
2257         }
2258
2259         memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2260         atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
2261
2262         vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
2263
2264         vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
2265         ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
2266         rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
2267         if (rc <= 0) {
2268                 pr_err("%s: reset timeout\n", __func__);
2269                 vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2270                 vfe_dev->vfe_open_cnt--;
2271                 mutex_unlock(&vfe_dev->core_mutex);
2272                 mutex_unlock(&vfe_dev->realtime_mutex);
2273                 return -EINVAL;
2274         }
2275
2276         vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
2277
2278         vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr,
2279                 "msm_isp");
2280
2281         memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
2282         memset(&vfe_dev->stats_data, 0,
2283                 sizeof(struct msm_vfe_stats_shared_data));
2284         memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2285         memset(&vfe_dev->fetch_engine_info, 0,
2286                 sizeof(vfe_dev->fetch_engine_info));
2287         vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
2288         vfe_dev->axi_data.enable_frameid_recovery = 0;
2289         vfe_dev->taskletq_idx = 0;
2290         vfe_dev->vt_enable = 0;
2291         vfe_dev->reg_update_requested = 0;
2292         /* Register page fault handler */
2293         vfe_dev->buf_mgr->pagefault_debug_disable = 0;
2294         cam_smmu_reg_client_page_fault_handler(
2295                         vfe_dev->buf_mgr->iommu_hdl,
2296                         msm_vfe_iommu_fault_handler, vfe_dev);
2297
2298         /* Disable vfe clks and allow device to go XO shutdown mode */
2299         if (vfe_dev->pdev->id == 0)
2300                 id = CAM_AHB_CLIENT_VFE0;
2301         else
2302                 id = CAM_AHB_CLIENT_VFE1;
2303         if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
2304                 pr_err("%s: failed to remove vote for AHB\n", __func__);
2305         vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
2306         vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
2307
2308         mutex_unlock(&vfe_dev->core_mutex);
2309         mutex_unlock(&vfe_dev->realtime_mutex);
2310         return 0;
2311 }
2312
2313 #ifdef CONFIG_MSM_AVTIMER
2314 static void msm_isp_end_avtimer(void)
2315 {
2316         avcs_core_disable_power_collapse(0);
2317 }
2318 #else
2319 static void msm_isp_end_avtimer(void)
2320 {
2321         pr_err("AV Timer is not supported\n");
2322 }
2323 #endif
2324
2325 int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2326 {
2327         long rc = 0;
2328         int wm;
2329         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2330         enum cam_ahb_clk_client id;
2331
2332         ISP_DBG("%s E open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2333         mutex_lock(&vfe_dev->realtime_mutex);
2334         mutex_lock(&vfe_dev->core_mutex);
2335
2336         /* Enable vfe clks to wake up from XO shutdown mode */
2337         if (vfe_dev->pdev->id == 0)
2338                 id = CAM_AHB_CLIENT_VFE0;
2339         else
2340                 id = CAM_AHB_CLIENT_VFE1;
2341         if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE) < 0)
2342                 pr_err("%s: failed to vote for AHB\n", __func__);
2343         vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 1);
2344         vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 1);
2345
2346         if (!vfe_dev->vfe_open_cnt) {
2347                 pr_err("%s invalid state open cnt %d\n", __func__,
2348                         vfe_dev->vfe_open_cnt);
2349                 mutex_unlock(&vfe_dev->core_mutex);
2350                 mutex_unlock(&vfe_dev->realtime_mutex);
2351                 return -EINVAL;
2352         }
2353
2354         if (vfe_dev->vfe_open_cnt > 1) {
2355                 vfe_dev->vfe_open_cnt--;
2356                 mutex_unlock(&vfe_dev->core_mutex);
2357                 mutex_unlock(&vfe_dev->realtime_mutex);
2358                 return 0;
2359         }
2360         /* Unregister page fault handler */
2361         cam_smmu_reg_client_page_fault_handler(
2362                 vfe_dev->buf_mgr->iommu_hdl,
2363                 NULL, vfe_dev);
2364
2365         rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
2366         if (rc <= 0)
2367                 pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
2368
2369         vfe_dev->hw_info->vfe_ops.core_ops.
2370                 update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
2371         vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 0);
2372
2373         /* after regular hw stop, reduce open cnt */
2374         vfe_dev->vfe_open_cnt--;
2375
2376         /* put scratch buf in all the wm */
2377         for (wm = 0; wm < vfe_dev->axi_data.hw_info->num_wm; wm++) {
2378                 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PING_FLAG);
2379                 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PONG_FLAG);
2380         }
2381         vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2382         vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
2383         if (vfe_dev->vt_enable) {
2384                 msm_isp_end_avtimer();
2385                 vfe_dev->vt_enable = 0;
2386         }
2387         vfe_dev->is_split = 0;
2388
2389         mutex_unlock(&vfe_dev->core_mutex);
2390         mutex_unlock(&vfe_dev->realtime_mutex);
2391         return 0;
2392 }
2393
2394 void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
2395 {
2396         unsigned long flags;
2397         struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2398
2399         spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
2400         while (atomic_read(&vfe_dev->irq_cnt)) {
2401                 queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
2402                 struct msm_vfe_tasklet_queue_cmd, list);
2403
2404                 if (!queue_cmd) {
2405                         atomic_set(&vfe_dev->irq_cnt, 0);
2406                         break;
2407                 }
2408                 atomic_sub(1, &vfe_dev->irq_cnt);
2409                 list_del(&queue_cmd->list);
2410                 queue_cmd->cmd_used = 0;
2411         }
2412         spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2413 }
2414
2415 void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
2416                                 enum msm_vfe_input_src frame_src)
2417 {
2418         struct msm_vfe_axi_stream *stream_info = NULL;
2419         uint32_t j = 0;
2420         unsigned long flags;
2421
2422         for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
2423                 stream_info = &vfe_dev->axi_data.stream_info[j];
2424                 if (stream_info->state != ACTIVE)
2425                         continue;
2426                 if (frame_src != SRC_TO_INTF(stream_info->stream_src))
2427                         continue;
2428
2429                 stream_info =
2430                         &vfe_dev->axi_data.stream_info[j];
2431                 spin_lock_irqsave(&stream_info->lock, flags);
2432                 stream_info->activated_framedrop_period  =
2433                         stream_info->requested_framedrop_period;
2434                 spin_unlock_irqrestore(&stream_info->lock, flags);
2435         }
2436 }
2437
2438 static void msm_isp_dump_irq_debug(void)
2439 {
2440         uint32_t index, count, i;
2441
2442         if (dump_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
2443                 index = dump_data.first;
2444                 count = MAX_ISP_PING_PONG_DUMP_SIZE;
2445         } else {
2446                 index = 0;
2447                 count = dump_data.first;
2448         }
2449         for (i = 0; i < count; i++) {
2450                 pr_err("%s: trace_msm_cam_ping_pong_debug_dump need re-impl\n",
2451                         __func__);
2452                 index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2453         }
2454 }
2455
2456 void msm_isp_dump_taskelet_debug(void)
2457 {
2458         uint32_t index, count, i;
2459
2460         if (tasklet_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
2461                 index = tasklet_data.first;
2462                 count = MAX_ISP_PING_PONG_DUMP_SIZE;
2463         } else {
2464                 index = 0;
2465                 count = tasklet_data.first;
2466         }
2467         for (i = 0; i < count; i++) {
2468                 pr_err("%s: trace_msm_cam_tasklet_debug_dump need implement\n",
2469                         __func__);
2470                 index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2471         }
2472 }
2473
2474 void msm_isp_dump_ping_pong_mismatch(void)
2475 {
2476         int i;
2477
2478         spin_lock(&dump_tasklet_lock);
2479         for (i = 0; i < MAX_VFE; i++) {
2480                 dump_data.vfe_dev->hw_info->vfe_ops.axi_ops.
2481                         clear_irq_mask(
2482                 dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[i]);
2483                 synchronize_irq(
2484                 (uint32_t)dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[
2485                         i]->vfe_irq->start);
2486         }
2487         trace_msm_cam_string(" ***** msm_isp_dump_irq_debug ****");
2488         msm_isp_dump_irq_debug();
2489         trace_msm_cam_string(" ***** msm_isp_dump_taskelet_debug ****");
2490         msm_isp_dump_taskelet_debug();
2491         spin_unlock(&dump_tasklet_lock);
2492 }