OSDN Git Service

msm: ais: sensor: Fix the state check for i2c ops
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / media / platform / msm / ais / isp / msm_isp_util.c
1 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <media/v4l2-subdev.h>
16 #include <linux/ratelimit.h>
17
18 #include "msm.h"
19 #include "msm_isp_util.h"
20 #include "msm_isp_axi_util.h"
21 #include "msm_isp_stats_util.h"
22 #include "msm_camera_io_util.h"
23 #include "cam_smmu_api.h"
24 #define CREATE_TRACE_POINTS
25 #include "trace/events/msm_cam.h"
26
27 #define MAX_ISP_V4l2_EVENTS 100
28 #define MAX_ISP_REG_LIST 100
29 #define MAX_ISP_CMD_NUM 10
30 #define MAX_ISP_CMD_LEN 4096
31 static DEFINE_MUTEX(bandwidth_mgr_mutex);
32 static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
33
34 static uint64_t msm_isp_cpp_clk_rate;
35 static struct dump_ping_pong_state dump_data;
36 static struct dump_ping_pong_state tasklet_data;
37 static DEFINE_SPINLOCK(dump_irq_lock);
38 static DEFINE_SPINLOCK(dump_tasklet_lock);
39
40 #define VFE40_8974V2_VERSION 0x1001001A
41
42 void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
43 {
44         int i;
45         char text[5];
46
47         text[4] = '\0';
48         for (i = 0; i < 4; i++) {
49                 text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
50                 if ((text[i] < '0') || (text[i] > 'z')) {
51                         pr_err("%s: Invalid output format %d (unprintable)\n",
52                                 origin, fourcc_format);
53                         return;
54                 }
55         }
56         pr_err("%s: Invalid output format %s\n",
57                 origin, text);
58 }
59
60 int msm_isp_init_bandwidth_mgr(struct vfe_device *vfe_dev,
61                         enum msm_isp_hw_client client)
62 {
63         int rc = 0;
64
65         mutex_lock(&bandwidth_mgr_mutex);
66         if (isp_bandwidth_mgr.client_info[client].active) {
67                 mutex_unlock(&bandwidth_mgr_mutex);
68                 return rc;
69         }
70         isp_bandwidth_mgr.client_info[client].active = 1;
71         isp_bandwidth_mgr.use_count++;
72         if (vfe_dev && !isp_bandwidth_mgr.bus_client) {
73                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.init_bw_mgr(vfe_dev,
74                                 &isp_bandwidth_mgr);
75                 if (!rc) {
76                         isp_bandwidth_mgr.update_bw =
77                         vfe_dev->hw_info->vfe_ops.platform_ops.update_bw;
78                         isp_bandwidth_mgr.deinit_bw_mgr =
79                         vfe_dev->hw_info->vfe_ops.platform_ops.deinit_bw_mgr;
80                 }
81         }
82         if (rc) {
83                 isp_bandwidth_mgr.use_count--;
84                 isp_bandwidth_mgr.client_info[client].active = 0;
85         }
86
87         mutex_unlock(&bandwidth_mgr_mutex);
88         return rc;
89 }
90
91 int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
92         uint64_t ab, uint64_t ib)
93 {
94         int rc;
95
96         mutex_lock(&bandwidth_mgr_mutex);
97         if (!isp_bandwidth_mgr.use_count ||
98                 !isp_bandwidth_mgr.bus_client) {
99                 pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
100                         __func__, isp_bandwidth_mgr.use_count,
101                         isp_bandwidth_mgr.bus_client);
102                 mutex_unlock(&bandwidth_mgr_mutex);
103                 return -EINVAL;
104         }
105
106         isp_bandwidth_mgr.client_info[client].ab = ab;
107         isp_bandwidth_mgr.client_info[client].ib = ib;
108         rc = isp_bandwidth_mgr.update_bw(&isp_bandwidth_mgr);
109         mutex_unlock(&bandwidth_mgr_mutex);
110         return 0;
111 }
112
113 void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
114 {
115         if (client >= MAX_ISP_CLIENT) {
116                 pr_err("invalid Client id %d", client);
117                 return;
118         }
119         mutex_lock(&bandwidth_mgr_mutex);
120         memset(&isp_bandwidth_mgr.client_info[client], 0,
121                         sizeof(struct msm_isp_bandwidth_info));
122         if (isp_bandwidth_mgr.use_count) {
123                 isp_bandwidth_mgr.use_count--;
124                 if (isp_bandwidth_mgr.use_count) {
125                         mutex_unlock(&bandwidth_mgr_mutex);
126                         return;
127                 }
128
129                 if (!isp_bandwidth_mgr.bus_client) {
130                         pr_err("%s:%d error: bus client invalid\n",
131                                 __func__, __LINE__);
132                         mutex_unlock(&bandwidth_mgr_mutex);
133                         return;
134                 }
135
136                 isp_bandwidth_mgr.deinit_bw_mgr(
137                                 &isp_bandwidth_mgr);
138         }
139         mutex_unlock(&bandwidth_mgr_mutex);
140 }
141
142 void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
143                                 struct msm_isp_statistics *stats)
144 {
145         stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
146         stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
147         stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
148
149         stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
150         stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
151         stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
152
153         stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
154         stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
155         stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
156         stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
157         stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
158         stats->vfe_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
159         stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
160 }
161
162 void msm_isp_util_update_clk_rate(long clock_rate)
163 {
164         msm_isp_cpp_clk_rate = clock_rate;
165 }
166
167 uint32_t msm_isp_get_framedrop_period(
168         enum msm_vfe_frame_skip_pattern frame_skip_pattern)
169 {
170         switch (frame_skip_pattern) {
171         case NO_SKIP:
172         case EVERY_2FRAME:
173         case EVERY_3FRAME:
174         case EVERY_4FRAME:
175         case EVERY_5FRAME:
176         case EVERY_6FRAME:
177         case EVERY_7FRAME:
178         case EVERY_8FRAME:
179                 return frame_skip_pattern + 1;
180         case EVERY_16FRAME:
181                 return 16;
182         case EVERY_32FRAME:
183                 return 32;
184         case SKIP_ALL:
185                 return 1;
186         default:
187                 return 1;
188         }
189         return 1;
190 }
191
192 void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
193         struct vfe_device *vfe_dev)
194 {
195         struct timespec ts;
196
197         do_gettimeofday(&(time_stamp->event_time));
198         if (vfe_dev->vt_enable) {
199                 msm_isp_get_avtimer_ts(time_stamp);
200                 time_stamp->buf_time.tv_sec    = time_stamp->vt_time.tv_sec;
201                 time_stamp->buf_time.tv_usec   = time_stamp->vt_time.tv_usec;
202         } else  {
203                 get_monotonic_boottime(&ts);
204                 time_stamp->buf_time.tv_sec    = ts.tv_sec;
205                 time_stamp->buf_time.tv_usec   = ts.tv_nsec/1000;
206         }
207
208 }
209
210 static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
211 {
212         u32 evt_id = ISP_EVENT_SUBS_MASK_NONE;
213
214         switch (evt_mask) {
215         case ISP_EVENT_MASK_INDEX_STATS_NOTIFY:
216                 evt_id = ISP_EVENT_STATS_NOTIFY;
217                 break;
218         case ISP_EVENT_MASK_INDEX_ERROR:
219                 evt_id = ISP_EVENT_ERROR;
220                 break;
221         case ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT:
222                 evt_id = ISP_EVENT_IOMMU_P_FAULT;
223                 break;
224         case ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE:
225                 evt_id = ISP_EVENT_STREAM_UPDATE_DONE;
226                 break;
227         case ISP_EVENT_MASK_INDEX_REG_UPDATE:
228                 evt_id = ISP_EVENT_REG_UPDATE;
229                 break;
230         case ISP_EVENT_MASK_INDEX_SOF:
231                 evt_id = ISP_EVENT_SOF;
232                 break;
233         case ISP_EVENT_MASK_INDEX_BUF_DIVERT:
234                 evt_id = ISP_EVENT_BUF_DIVERT;
235                 break;
236         case ISP_EVENT_MASK_INDEX_BUF_DONE:
237                 evt_id = ISP_EVENT_BUF_DONE;
238                 break;
239         case ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY:
240                 evt_id = ISP_EVENT_COMP_STATS_NOTIFY;
241                 break;
242         case ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE:
243                 evt_id = ISP_EVENT_FE_READ_DONE;
244                 break;
245         case ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH:
246                 evt_id = ISP_EVENT_PING_PONG_MISMATCH;
247                 break;
248         case ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING:
249                 evt_id = ISP_EVENT_REG_UPDATE_MISSING;
250                 break;
251         case ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR:
252                 evt_id = ISP_EVENT_BUF_FATAL_ERROR;
253                 break;
254         default:
255                 evt_id = ISP_EVENT_SUBS_MASK_NONE;
256                 break;
257         }
258
259         return evt_id;
260 }
261
262 static inline int msm_isp_subscribe_event_mask(struct v4l2_fh *fh,
263                 struct v4l2_event_subscription *sub, int evt_mask_index,
264                 u32 evt_id, bool subscribe_flag)
265 {
266         int rc = 0, i, interface;
267
268         if (evt_mask_index == ISP_EVENT_MASK_INDEX_STATS_NOTIFY) {
269                 for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
270                         sub->type = evt_id + i;
271                         if (subscribe_flag)
272                                 rc = v4l2_event_subscribe(fh, sub,
273                                         MAX_ISP_V4l2_EVENTS, NULL);
274                         else
275                                 rc = v4l2_event_unsubscribe(fh, sub);
276                         if (rc != 0) {
277                                 pr_err("%s: Subs event_type =0x%x failed\n",
278                                         __func__, sub->type);
279                                 return rc;
280                         }
281                 }
282         } else if (evt_mask_index == ISP_EVENT_MASK_INDEX_SOF ||
283                    evt_mask_index == ISP_EVENT_MASK_INDEX_REG_UPDATE ||
284                    evt_mask_index == ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE) {
285                 for (interface = 0; interface < VFE_SRC_MAX; interface++) {
286                         sub->type = evt_id | interface;
287                         if (subscribe_flag)
288                                 rc = v4l2_event_subscribe(fh, sub,
289                                         MAX_ISP_V4l2_EVENTS, NULL);
290                         else
291                                 rc = v4l2_event_unsubscribe(fh, sub);
292                         if (rc != 0) {
293                                 pr_err("%s: Subs event_type =0x%x failed\n",
294                                         __func__, sub->type);
295                                 return rc;
296                         }
297                 }
298         } else {
299                 sub->type = evt_id;
300                 if (subscribe_flag)
301                         rc = v4l2_event_subscribe(fh, sub,
302                                 MAX_ISP_V4l2_EVENTS, NULL);
303                 else
304                         rc = v4l2_event_unsubscribe(fh, sub);
305                 if (rc != 0) {
306                         pr_err("%s: Subs event_type =0x%x failed\n",
307                                 __func__, sub->type);
308                         return rc;
309                 }
310         }
311         return rc;
312 }
313
314 static inline int msm_isp_process_event_subscription(struct v4l2_fh *fh,
315         struct v4l2_event_subscription *sub, bool subscribe_flag)
316 {
317         int rc = 0, evt_mask_index = 0;
318         u32 evt_mask = sub->type;
319         u32 evt_id = 0;
320
321         if (evt_mask == ISP_EVENT_SUBS_MASK_NONE) {
322                 pr_err("%s: Subs event_type is None=0x%x\n",
323                         __func__, evt_mask);
324                 return 0;
325         }
326
327         for (evt_mask_index = ISP_EVENT_MASK_INDEX_STATS_NOTIFY;
328                 evt_mask_index <= ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR;
329                 evt_mask_index++) {
330                 if (evt_mask & (1<<evt_mask_index)) {
331                         evt_id = msm_isp_evt_mask_to_isp_event(evt_mask_index);
332                         rc = msm_isp_subscribe_event_mask(fh, sub,
333                                 evt_mask_index, evt_id, subscribe_flag);
334                         if (rc != 0) {
335                                 pr_err("%s: Subs event index:%d failed\n",
336                                         __func__, evt_mask_index);
337                                 return rc;
338                         }
339                 }
340         }
341         return rc;
342 }
343
344 int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
345         struct v4l2_event_subscription *sub)
346 {
347         return msm_isp_process_event_subscription(fh, sub, true);
348 }
349
350 int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
351         struct v4l2_event_subscription *sub)
352 {
353         return msm_isp_process_event_subscription(fh, sub, false);
354 }
355
356 static int msm_isp_start_fetch_engine(struct vfe_device *vfe_dev,
357         void *arg)
358 {
359         struct msm_vfe_fetch_eng_start *fe_cfg = arg;
360         /*
361          * For Offline VFE, HAL expects same frame id
362          * for offline output which it requested in do_reprocess.
363          */
364         vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
365                 fe_cfg->frame_id;
366         return vfe_dev->hw_info->vfe_ops.core_ops.
367                 start_fetch_eng(vfe_dev, arg);
368 }
369
370 static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
371         void *arg)
372 {
373         struct msm_vfe_fetch_eng_multi_pass_start *fe_cfg = arg;
374         struct msm_vfe_axi_stream *stream_info = NULL;
375         int i = 0, rc;
376         uint32_t wm_reload_mask = 0;
377         /*
378          * For Offline VFE, HAL expects same frame id
379          * for offline output which it requested in do_reprocess.
380          */
381         vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
382                 fe_cfg->frame_id;
383
384         if (fe_cfg->offline_pass == OFFLINE_SECOND_PASS) {
385                 stream_info = &vfe_dev->axi_data.stream_info[
386                         HANDLE_TO_IDX(fe_cfg->output_stream_id)];
387                 if (!stream_info) {
388                         pr_err("%s: Couldn't find streamid 0x%X\n", __func__,
389                                 fe_cfg->output_stream_id);
390                         return -EINVAL;
391                 }
392                 vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
393                         0, 1);
394                 msm_isp_reset_framedrop(vfe_dev, stream_info);
395                 mutex_lock(&vfe_dev->buf_mgr->lock);
396                 rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
397                         VFE_PING_FLAG, fe_cfg->output_buf_idx);
398                 mutex_unlock(&vfe_dev->buf_mgr->lock);
399                 if (rc < 0) {
400                         pr_err("%s: Fetch engine config failed\n", __func__);
401                         return -EINVAL;
402                 }
403                 for (i = 0; i < stream_info->num_planes; i++) {
404                         vfe_dev->hw_info->vfe_ops.axi_ops.
405                         enable_wm(vfe_dev->vfe_base, stream_info->wm[i],
406                                         1);
407                         wm_reload_mask |= (1 << stream_info->wm[i]);
408                 }
409                 vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
410                         VFE_SRC_MAX);
411                 vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
412                         vfe_dev->vfe_base, wm_reload_mask);
413         }
414         return vfe_dev->hw_info->vfe_ops.core_ops.
415                 start_fetch_eng_multi_pass(vfe_dev, arg);
416 }
417
418 void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
419         struct msm_vfe_fetch_engine_info *fetch_engine_info)
420 {
421         struct msm_isp_event_data fe_rd_done_event;
422
423         memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data));
424         fe_rd_done_event.frame_id =
425                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
426         fe_rd_done_event.u.fetch_done.session_id =
427                 fetch_engine_info->session_id;
428         fe_rd_done_event.u.fetch_done.stream_id = fetch_engine_info->stream_id;
429         fe_rd_done_event.u.fetch_done.handle = fetch_engine_info->bufq_handle;
430         fe_rd_done_event.u.fetch_done.buf_idx = fetch_engine_info->buf_idx;
431         fe_rd_done_event.u.fetch_done.fd = fetch_engine_info->fd;
432         fe_rd_done_event.u.fetch_done.offline_mode =
433                 fetch_engine_info->offline_mode;
434
435         ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
436                 __func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
437         fetch_engine_info->is_busy = 0;
438         msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
439 }
440
441 static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
442         struct msm_vfe_input_cfg *input_cfg)
443 {
444         int rc = 0;
445         struct msm_vfe_pix_cfg *pix_cfg = NULL;
446
447         pr_debug("%s: entry\n", __func__);
448
449         if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
450                 pr_err("%s: pixel path is active\n", __func__);
451                 return -EINVAL;
452         }
453
454         pix_cfg = &input_cfg->d.pix_cfg;
455
456         vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
457                 input_cfg->input_pix_clk;
458         vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
459                 input_cfg->d.pix_cfg.input_mux;
460         vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
461                 input_cfg->d.pix_cfg.input_format;
462         vfe_dev->axi_data.src_info[VFE_PIX_0].sof_counter_step = 1;
463
464         /*
465          * Fill pixel_clock into input_pix_clk so that user space
466          * can use rounded clk rate
467          */
468         input_cfg->input_pix_clk =
469                 vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock;
470
471         ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
472                 input_cfg->d.pix_cfg.input_mux, CAMIF,
473                 input_cfg->d.pix_cfg.input_format);
474
475         if (input_cfg->d.pix_cfg.input_mux == CAMIF ||
476                 input_cfg->d.pix_cfg.input_mux == TESTGEN) {
477                 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
478                         input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
479                 if (input_cfg->d.pix_cfg.camif_cfg.subsample_cfg.
480                         sof_counter_step > 0) {
481                         vfe_dev->axi_data.src_info[VFE_PIX_0].
482                                 sof_counter_step = input_cfg->d.pix_cfg.
483                                 camif_cfg.subsample_cfg.sof_counter_step;
484                 }
485         } else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
486                 vfe_dev->axi_data.src_info[VFE_PIX_0].width =
487                         input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
488         }
489         vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
490                         vfe_dev, &input_cfg->d.pix_cfg);
491         vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_PIX_0);
492
493         pr_debug("%s: exit\n", __func__);
494
495         return rc;
496 }
497
498 static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
499         struct msm_vfe_input_cfg *input_cfg)
500 {
501         int rc = 0;
502
503         if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
504                 pr_err("%s: RAW%d path is active\n", __func__,
505                            input_cfg->input_src - VFE_RAW_0);
506                 return -EINVAL;
507         }
508
509         vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
510                 input_cfg->input_pix_clk;
511         vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
512                 vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
513         return rc;
514 }
515
516 static int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
517 {
518         int rc = 0;
519         struct msm_vfe_input_cfg *input_cfg = arg;
520         long pixel_clock = 0;
521
522         switch (input_cfg->input_src) {
523         case VFE_PIX_0:
524                 rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
525                 break;
526         case VFE_RAW_0:
527         case VFE_RAW_1:
528         case VFE_RAW_2:
529                 rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
530                 break;
531         default:
532                 pr_err("%s: Invalid input source\n", __func__);
533                 rc = -EINVAL;
534         }
535
536         pixel_clock = input_cfg->input_pix_clk;
537         rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
538                 &pixel_clock);
539         if (rc < 0) {
540                 pr_err("%s: clock set rate failed\n", __func__);
541                 return rc;
542         }
543         return rc;
544 }
545
546 static int msm_isp_camif_cfg(struct vfe_device *vfe_dev, void *arg)
547 {
548         int rc = 0;
549         struct msm_vfe_camif_cfg *camif_cfg = arg;
550         struct msm_vfe_input_cfg input_cfg;
551         long pixel_clock = 0;
552
553         pr_debug("%s: entry\n", __func__);
554
555         memset(&input_cfg, 0, sizeof(input_cfg));
556
557         input_cfg.input_src = VFE_PIX_0;
558         input_cfg.input_pix_clk = 320000000;
559         input_cfg.d.pix_cfg.camif_cfg = *camif_cfg;
560
561         /* populate values from operation cfg */
562         input_cfg.d.pix_cfg.input_mux =
563                 vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux;
564         input_cfg.d.pix_cfg.camif_cfg.camif_input =
565                 vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input;
566
567         rc = msm_isp_cfg_pix(vfe_dev, &input_cfg);
568
569         pixel_clock = input_cfg.input_pix_clk;
570         rc = vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev,
571                 &pixel_clock);
572         if (rc < 0) {
573                 pr_err("%s: clock set rate failed\n", __func__);
574                 return rc;
575         }
576
577         pr_debug("%s: exit\n", __func__);
578
579         return rc;
580 }
581
582
583 static int msm_isp_operation_cfg(struct vfe_device *vfe_dev, void *arg)
584 {
585         struct msm_vfe_operation_cfg *op_cfg = arg;
586
587         pr_debug("%s: entry\n", __func__);
588
589         vfe_dev->hvx_cmd = op_cfg->hvx_cmd;
590         vfe_dev->is_split = 0; /* default to false */
591
592         /* yuv_cosited currently not used */
593         /* pixel input select not used */
594
595         vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
596                 op_cfg->input_mux;
597         vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_pattern =
598                 op_cfg->pixel_pattern;
599         vfe_dev->axi_data.src_info[VFE_PIX_0].camif_input = op_cfg->camif_input;
600
601         pr_debug("%s: exit\n", __func__);
602
603         return 0;
604 }
605
606
607 static int msm_isp_set_dual_HW_master_slave_mode(
608         struct vfe_device *vfe_dev, void *arg)
609 {
610         /*
611          * This method assumes no 2 processes are accessing it simultaneously.
612          * Currently this is guaranteed by mutex lock in ioctl.
613          * If that changes, need to revisit this
614          */
615         int rc = 0, i, j;
616         struct msm_isp_set_dual_hw_ms_cmd *dual_hw_ms_cmd = NULL;
617         struct msm_vfe_src_info *src_info = NULL;
618         unsigned long flags;
619
620         if (!vfe_dev || !arg) {
621                 pr_err("%s: Error! Invalid input vfe_dev %pK arg %pK\n",
622                         __func__, vfe_dev, arg);
623                 return -EINVAL;
624         }
625
626         dual_hw_ms_cmd = (struct msm_isp_set_dual_hw_ms_cmd *)arg;
627         vfe_dev->common_data->ms_resource.dual_hw_type = DUAL_HW_MASTER_SLAVE;
628         vfe_dev->vfe_ub_policy = MSM_WM_UB_EQUAL_SLICING;
629         if (dual_hw_ms_cmd->primary_intf < VFE_SRC_MAX) {
630                 ISP_DBG("%s: vfe %d primary_intf %d\n", __func__,
631                         vfe_dev->pdev->id, dual_hw_ms_cmd->primary_intf);
632                 src_info = &vfe_dev->axi_data.
633                         src_info[dual_hw_ms_cmd->primary_intf];
634                 src_info->dual_hw_ms_info.dual_hw_ms_type =
635                         dual_hw_ms_cmd->dual_hw_ms_type;
636         }
637
638         /* No lock needed here since ioctl lock protects 2 session from race */
639         if (src_info != NULL &&
640                 dual_hw_ms_cmd->dual_hw_ms_type == MS_TYPE_MASTER) {
641                 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
642                 ISP_DBG("%s: vfe %d Master\n", __func__, vfe_dev->pdev->id);
643
644                 src_info->dual_hw_ms_info.sof_info =
645                         &vfe_dev->common_data->ms_resource.master_sof_info;
646                 vfe_dev->common_data->ms_resource.sof_delta_threshold =
647                         dual_hw_ms_cmd->sof_delta_threshold;
648         } else if (src_info != NULL) {
649                 spin_lock_irqsave(
650                         &vfe_dev->common_data->common_dev_data_lock,
651                         flags);
652                 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
653                 ISP_DBG("%s: vfe %d Slave\n", __func__, vfe_dev->pdev->id);
654
655                 for (j = 0; j < MS_NUM_SLAVE_MAX; j++) {
656                         if (vfe_dev->common_data->ms_resource.
657                                 reserved_slave_mask & (1 << j))
658                                 continue;
659
660                         vfe_dev->common_data->ms_resource.reserved_slave_mask |=
661                                 (1 << j);
662                         vfe_dev->common_data->ms_resource.num_slave++;
663                         src_info->dual_hw_ms_info.sof_info =
664                                 &vfe_dev->common_data->ms_resource.
665                                 slave_sof_info[j];
666                         src_info->dual_hw_ms_info.slave_id = j;
667                         ISP_DBG("%s: Slave id %d\n", __func__, j);
668                         break;
669                 }
670                 spin_unlock_irqrestore(
671                         &vfe_dev->common_data->common_dev_data_lock,
672                         flags);
673
674                 if (j == MS_NUM_SLAVE_MAX) {
675                         pr_err("%s: Error! Cannot find free aux resource\n",
676                                 __func__);
677                         return -EBUSY;
678                 }
679         }
680         ISP_DBG("%s: vfe %d num_src %d\n", __func__, vfe_dev->pdev->id,
681                 dual_hw_ms_cmd->num_src);
682         if (dual_hw_ms_cmd->num_src > VFE_SRC_MAX) {
683                 pr_err("%s: Error! Invalid num_src %d\n", __func__,
684                         dual_hw_ms_cmd->num_src);
685                 return -EINVAL;
686         }
687         /* This for loop is for non-primary intf to be marked with Master/Slave
688          * in order for frame id sync. But their timestamp is not saved.
689          * So no sof_info resource is allocated
690          */
691         for (i = 0; i < dual_hw_ms_cmd->num_src; i++) {
692                 if (dual_hw_ms_cmd->input_src[i] >= VFE_SRC_MAX) {
693                         pr_err("%s: Error! Invalid SRC param %d\n", __func__,
694                                 dual_hw_ms_cmd->input_src[i]);
695                         return -EINVAL;
696                 }
697                 ISP_DBG("%s: vfe %d src %d type %d\n", __func__,
698                         vfe_dev->pdev->id, dual_hw_ms_cmd->input_src[i],
699                         dual_hw_ms_cmd->dual_hw_ms_type);
700                 src_info = &vfe_dev->axi_data.
701                         src_info[dual_hw_ms_cmd->input_src[i]];
702                 src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
703                 src_info->dual_hw_ms_info.dual_hw_ms_type =
704                         dual_hw_ms_cmd->dual_hw_ms_type;
705         }
706
707         return rc;
708 }
709
710 static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
711 {
712         int rc = 0;
713         uint32_t count = 0;
714         struct msm_vfe_cfg_cmd_list *proc_cmd =
715                 (struct msm_vfe_cfg_cmd_list *)arg;
716         struct msm_vfe_cfg_cmd_list cmd, cmd_next;
717
718         if (!vfe_dev || !arg) {
719                 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
720                         vfe_dev, arg);
721                 return -EINVAL;
722         }
723
724         rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
725         if (rc < 0)
726                 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
727
728         cmd = *proc_cmd;
729
730         while (cmd.next) {
731                 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
732                         pr_err("%s:%d failed: next size %u != expected %zu\n",
733                                 __func__, __LINE__, cmd.next_size,
734                                 sizeof(struct msm_vfe_cfg_cmd_list));
735                         break;
736                 }
737                 if (++count >= MAX_ISP_REG_LIST) {
738                         pr_err("%s:%d Error exceeding the max register count:%u\n",
739                                 __func__, __LINE__, count);
740                         rc = -EFAULT;
741                         break;
742                 }
743                 if (copy_from_user(&cmd_next, (void __user *)cmd.next,
744                         sizeof(struct msm_vfe_cfg_cmd_list))) {
745                         rc = -EFAULT;
746                         continue;
747                 }
748
749                 rc = msm_isp_proc_cmd(vfe_dev, &cmd_next.cfg_cmd);
750                 if (rc < 0)
751                         pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
752
753                 cmd = cmd_next;
754         }
755         return rc;
756 }
757
758 #ifdef CONFIG_COMPAT
759 struct msm_vfe_cfg_cmd2_32 {
760         uint16_t num_cfg;
761         uint16_t cmd_len;
762         compat_caddr_t cfg_data;
763         compat_caddr_t cfg_cmd;
764 };
765
766 struct msm_vfe_cfg_cmd_list_32 {
767         struct msm_vfe_cfg_cmd2_32   cfg_cmd;
768         compat_caddr_t               next;
769         uint32_t                     next_size;
770 };
771
772 #define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
773         _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
774 #define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
775         _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
776
777 static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
778         struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
779 {
780         proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
781         proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
782         proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
783         proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
784 }
785
786 static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
787 {
788         int rc = 0;
789         uint32_t count = 0;
790         struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
791                 (struct msm_vfe_cfg_cmd_list_32 *)arg;
792         struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
793         struct msm_vfe_cfg_cmd2 current_cmd;
794
795         if (!vfe_dev || !arg) {
796                 pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
797                         vfe_dev, arg);
798                 return -EINVAL;
799         }
800         msm_isp_compat_to_proc_cmd(&current_cmd, &proc_cmd->cfg_cmd);
801         rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
802         if (rc < 0)
803                 pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
804
805         cmd = *proc_cmd;
806
807         while (compat_ptr(cmd.next) != NULL) {
808                 if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
809                         pr_err("%s:%d failed: next size %u != expected %zu\n",
810                                 __func__, __LINE__, cmd.next_size,
811                                 sizeof(struct msm_vfe_cfg_cmd_list));
812                         break;
813                 }
814                 if (++count >= MAX_ISP_REG_LIST) {
815                         pr_err("%s:%d Error exceeding the max register count:%u\n",
816                                 __func__, __LINE__, count);
817                         rc = -EFAULT;
818                         break;
819                 }
820                 if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
821                         sizeof(struct msm_vfe_cfg_cmd_list_32))) {
822                         rc = -EFAULT;
823                         continue;
824                 }
825
826                 msm_isp_compat_to_proc_cmd(&current_cmd, &cmd_next.cfg_cmd);
827                 rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
828                 if (rc < 0)
829                         pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
830
831                 cmd = cmd_next;
832         }
833         return rc;
834 }
835
836 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
837 {
838         if (is_compat_task())
839                 return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
840         else
841                 return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
842 }
843 #else /* CONFIG_COMPAT */
844 static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
845 {
846         return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
847 }
848 #endif /* CONFIG_COMPAT */
849
850 static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
851         unsigned int cmd, void *arg)
852 {
853         long rc = 0;
854         long rc2 = 0;
855         unsigned long flags;
856         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
857
858         if (!vfe_dev || !vfe_dev->vfe_base) {
859                 pr_err("%s:%d failed: invalid params %pK\n",
860                         __func__, __LINE__, vfe_dev);
861                 if (vfe_dev)
862                         pr_err("%s:%d failed %pK\n", __func__,
863                                 __LINE__, vfe_dev->vfe_base);
864                 return -EINVAL;
865         }
866
867         /* use real time mutex for hard real-time ioctls such as
868          * buffer operations and register updates.
869          * Use core mutex for other ioctls that could take
870          * longer time to complete such as start/stop ISP streams
871          * which blocks until the hardware start/stop streaming
872          */
873         ISP_DBG("%s: cmd: %d\n", __func__, _IOC_TYPE(cmd));
874         switch (cmd) {
875         case VIDIOC_MSM_VFE_REG_CFG: {
876                 mutex_lock(&vfe_dev->realtime_mutex);
877                 rc = msm_isp_proc_cmd(vfe_dev, arg);
878                 mutex_unlock(&vfe_dev->realtime_mutex);
879                 break;
880         }
881         case VIDIOC_MSM_VFE_REG_LIST_CFG: {
882                 mutex_lock(&vfe_dev->realtime_mutex);
883                 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
884                 mutex_unlock(&vfe_dev->realtime_mutex);
885                 break;
886         }
887         case VIDIOC_MSM_ISP_REQUEST_BUFQ:
888                 /* fallthrough */
889         case VIDIOC_MSM_ISP_ENQUEUE_BUF:
890                 /* fallthrough */
891         case VIDIOC_MSM_ISP_DEQUEUE_BUF:
892                 /* fallthrough */
893         case VIDIOC_MSM_ISP_UNMAP_BUF: {
894                 mutex_lock(&vfe_dev->buf_mgr->lock);
895                 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
896                 mutex_unlock(&vfe_dev->buf_mgr->lock);
897                 break;
898         }
899         case VIDIOC_MSM_ISP_RELEASE_BUFQ: {
900                 if (vfe_dev->buf_mgr == NULL) {
901                         pr_err("%s: buf mgr NULL! rc = -1\n", __func__);
902                         rc = -EINVAL;
903                         return rc;
904                 }
905                 mutex_lock(&vfe_dev->buf_mgr->lock);
906                 rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
907                 mutex_unlock(&vfe_dev->buf_mgr->lock);
908                 break;
909         }
910         case VIDIOC_MSM_ISP_REQUEST_STREAM:
911                 mutex_lock(&vfe_dev->core_mutex);
912                 rc = msm_isp_request_axi_stream(vfe_dev, arg);
913                 mutex_unlock(&vfe_dev->core_mutex);
914                 break;
915         case VIDIOC_MSM_ISP_RELEASE_STREAM:
916                 mutex_lock(&vfe_dev->core_mutex);
917                 rc = msm_isp_release_axi_stream(vfe_dev, arg);
918                 mutex_unlock(&vfe_dev->core_mutex);
919                 break;
920         case VIDIOC_MSM_ISP_CFG_STREAM:
921                 mutex_lock(&vfe_dev->core_mutex);
922                 mutex_lock(&vfe_dev->buf_mgr->lock);
923                 rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
924                 mutex_unlock(&vfe_dev->buf_mgr->lock);
925                 mutex_unlock(&vfe_dev->core_mutex);
926                 break;
927         case VIDIOC_MSM_ISP_CFG_HW_STATE:
928                 mutex_lock(&vfe_dev->core_mutex);
929                 rc = msm_isp_update_stream_bandwidth(vfe_dev,
930                         *(enum msm_vfe_hw_state *)arg);
931                 mutex_unlock(&vfe_dev->core_mutex);
932                 break;
933         case VIDIOC_MSM_ISP_AXI_HALT:
934                 mutex_lock(&vfe_dev->core_mutex);
935                 rc = msm_isp_axi_halt(vfe_dev, arg);
936                 mutex_unlock(&vfe_dev->core_mutex);
937                 break;
938         case VIDIOC_MSM_ISP_AXI_RESET:
939                 mutex_lock(&vfe_dev->core_mutex);
940                 if (atomic_read(&vfe_dev->error_info.overflow_state)
941                         != HALT_ENFORCED) {
942                         rc = msm_isp_stats_reset(vfe_dev);
943                         rc2 = msm_isp_axi_reset(vfe_dev, arg);
944                         if (!rc && rc2)
945                                 rc = rc2;
946                 } else {
947                         pr_err_ratelimited("%s: no HW reset, halt enforced.\n",
948                                 __func__);
949                 }
950                 mutex_unlock(&vfe_dev->core_mutex);
951                 break;
952         case VIDIOC_MSM_ISP_AXI_RESTART:
953                 mutex_lock(&vfe_dev->core_mutex);
954                 mutex_lock(&vfe_dev->buf_mgr->lock);
955                 if (atomic_read(&vfe_dev->error_info.overflow_state)
956                         != HALT_ENFORCED) {
957                         rc = msm_isp_stats_restart(vfe_dev);
958                         rc2 = msm_isp_axi_restart(vfe_dev, arg);
959                         if (!rc && rc2)
960                                 rc = rc2;
961                 } else {
962                         pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
963                                 __func__);
964                 }
965                 mutex_unlock(&vfe_dev->buf_mgr->lock);
966                 mutex_unlock(&vfe_dev->core_mutex);
967                 break;
968         case VIDIOC_MSM_ISP_INPUT_CFG:
969                 mutex_lock(&vfe_dev->core_mutex);
970                 rc = msm_isp_cfg_input(vfe_dev, arg);
971                 mutex_unlock(&vfe_dev->core_mutex);
972                 break;
973         case VIDIOC_MSM_ISP_AHB_CLK_CFG:
974                 mutex_lock(&vfe_dev->core_mutex);
975                 if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
976                         rc = vfe_dev->hw_info->vfe_ops.core_ops.
977                                         ahb_clk_cfg(vfe_dev, arg);
978                 else
979                         rc = -EOPNOTSUPP;
980                 mutex_unlock(&vfe_dev->core_mutex);
981                 break;
982         case VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE:
983                 mutex_lock(&vfe_dev->core_mutex);
984                 rc = msm_isp_set_dual_HW_master_slave_mode(vfe_dev, arg);
985                 mutex_unlock(&vfe_dev->core_mutex);
986                 break;
987         case VIDIOC_MSM_ISP_FETCH_ENG_START:
988         case VIDIOC_MSM_ISP_MAP_BUF_START_FE:
989                 mutex_lock(&vfe_dev->core_mutex);
990                 rc = msm_isp_start_fetch_engine(vfe_dev, arg);
991                 mutex_unlock(&vfe_dev->core_mutex);
992                 break;
993
994         case VIDIOC_MSM_ISP_FETCH_ENG_MULTI_PASS_START:
995         case VIDIOC_MSM_ISP_MAP_BUF_START_MULTI_PASS_FE:
996                 mutex_lock(&vfe_dev->core_mutex);
997                 rc = msm_isp_start_fetch_engine_multi_pass(vfe_dev, arg);
998                 mutex_unlock(&vfe_dev->core_mutex);
999                 break;
1000         case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
1001                 if (arg) {
1002                         enum msm_vfe_input_src frame_src =
1003                                 *((enum msm_vfe_input_src *)arg);
1004                         vfe_dev->hw_info->vfe_ops.core_ops.
1005                                 reg_update(vfe_dev, frame_src);
1006                 }
1007                 break;
1008         case VIDIOC_MSM_ISP_SET_SRC_STATE:
1009                 mutex_lock(&vfe_dev->core_mutex);
1010                 rc = msm_isp_set_src_state(vfe_dev, arg);
1011                 mutex_unlock(&vfe_dev->core_mutex);
1012                 break;
1013         case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
1014                 mutex_lock(&vfe_dev->core_mutex);
1015                 rc = msm_isp_request_stats_stream(vfe_dev, arg);
1016                 mutex_unlock(&vfe_dev->core_mutex);
1017                 break;
1018         case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
1019                 mutex_lock(&vfe_dev->core_mutex);
1020                 rc = msm_isp_release_stats_stream(vfe_dev, arg);
1021                 mutex_unlock(&vfe_dev->core_mutex);
1022                 break;
1023         case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
1024                 mutex_lock(&vfe_dev->core_mutex);
1025                 mutex_lock(&vfe_dev->buf_mgr->lock);
1026                 rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
1027                 mutex_unlock(&vfe_dev->buf_mgr->lock);
1028                 mutex_unlock(&vfe_dev->core_mutex);
1029                 break;
1030         case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
1031                 mutex_lock(&vfe_dev->core_mutex);
1032                 rc = msm_isp_update_stats_stream(vfe_dev, arg);
1033                 mutex_unlock(&vfe_dev->core_mutex);
1034                 break;
1035         case VIDIOC_MSM_ISP_UPDATE_STREAM:
1036                 mutex_lock(&vfe_dev->core_mutex);
1037                 rc = msm_isp_update_axi_stream(vfe_dev, arg);
1038                 mutex_unlock(&vfe_dev->core_mutex);
1039                 break;
1040         case VIDIOC_MSM_ISP_SMMU_ATTACH:
1041                 mutex_lock(&vfe_dev->core_mutex);
1042                 rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
1043                 mutex_unlock(&vfe_dev->core_mutex);
1044                 break;
1045         case VIDIOC_MSM_ISP_OPERATION_CFG:
1046                 mutex_lock(&vfe_dev->core_mutex);
1047                 msm_isp_operation_cfg(vfe_dev, arg);
1048                 mutex_unlock(&vfe_dev->core_mutex);
1049                 break;
1050         case VIDIOC_MSM_ISP_AXI_OUTPUT_CFG:
1051                 mutex_lock(&vfe_dev->core_mutex);
1052                 rc = msm_isp_axi_output_cfg(vfe_dev, arg);
1053                 mutex_unlock(&vfe_dev->core_mutex);
1054                 break;
1055         case VIDIOC_MSM_ISP_CAMIF_CFG:
1056                 mutex_lock(&vfe_dev->core_mutex);
1057                 rc = msm_isp_camif_cfg(vfe_dev, arg);
1058                 mutex_unlock(&vfe_dev->core_mutex);
1059                 break;
1060         case MSM_SD_NOTIFY_FREEZE:
1061                 vfe_dev->isp_sof_debug = 0;
1062                 vfe_dev->isp_raw0_debug = 0;
1063                 vfe_dev->isp_raw1_debug = 0;
1064                 vfe_dev->isp_raw2_debug = 0;
1065                 break;
1066         case MSM_SD_UNNOTIFY_FREEZE:
1067                 break;
1068         case MSM_SD_SHUTDOWN:
1069                 while (vfe_dev->vfe_open_cnt != 0)
1070                         msm_isp_close_node(sd, NULL);
1071                 break;
1072         case VIDIOC_MSM_ISP_SET_CLK_STATUS:
1073                 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
1074                 vfe_dev->clk_enabled = *((unsigned int *)arg);
1075                 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
1076                 break;
1077
1078         default:
1079                 pr_err_ratelimited("%s: Invalid ISP command %d\n", __func__,
1080                                 cmd);
1081                 rc = -EINVAL;
1082         }
1083         return rc;
1084 }
1085
1086
1087 #ifdef CONFIG_COMPAT
1088 static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
1089         unsigned int cmd, void *arg)
1090 {
1091         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
1092         long rc = 0;
1093
1094         if (!vfe_dev || !vfe_dev->vfe_base) {
1095                 pr_err("%s:%d failed: invalid params %pK\n",
1096                         __func__, __LINE__, vfe_dev);
1097                 if (vfe_dev)
1098                         pr_err("%s:%d failed %pK\n", __func__,
1099                                 __LINE__, vfe_dev->vfe_base);
1100                 return -EINVAL;
1101         }
1102
1103         switch (cmd) {
1104         case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
1105                 struct msm_vfe_cfg_cmd2 proc_cmd;
1106
1107                 mutex_lock(&vfe_dev->realtime_mutex);
1108                 msm_isp_compat_to_proc_cmd(&proc_cmd,
1109                         (struct msm_vfe_cfg_cmd2_32 *) arg);
1110                 rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
1111                 mutex_unlock(&vfe_dev->realtime_mutex);
1112                 break;
1113         }
1114         case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
1115                 mutex_lock(&vfe_dev->realtime_mutex);
1116                 rc = msm_isp_proc_cmd_list(vfe_dev, arg);
1117                 mutex_unlock(&vfe_dev->realtime_mutex);
1118                 break;
1119         }
1120         default:
1121                 return msm_isp_ioctl_unlocked(sd, cmd, arg);
1122         }
1123
1124         return rc;
1125 }
1126
1127 long msm_isp_ioctl(struct v4l2_subdev *sd,
1128         unsigned int cmd, void *arg)
1129 {
1130         return msm_isp_ioctl_compat(sd, cmd, arg);
1131 }
1132 #else /* CONFIG_COMPAT */
1133 long msm_isp_ioctl(struct v4l2_subdev *sd,
1134         unsigned int cmd, void *arg)
1135 {
1136         return msm_isp_ioctl_unlocked(sd, cmd, arg);
1137 }
1138 #endif /* CONFIG_COMPAT */
1139
1140 static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
1141         struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
1142         uint32_t *cfg_data, uint32_t cmd_len)
1143 {
1144         if (!vfe_dev || !reg_cfg_cmd) {
1145                 pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
1146                         __LINE__, vfe_dev, reg_cfg_cmd);
1147                 return -EINVAL;
1148         }
1149         if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
1150                 (!cfg_data || !cmd_len)) {
1151                 pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
1152                         __func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
1153                         cmd_len);
1154                 return -EINVAL;
1155         }
1156
1157         /* Validate input parameters */
1158         switch (reg_cfg_cmd->cmd_type) {
1159         case VFE_WRITE:
1160         case VFE_READ:
1161         case VFE_WRITE_MB: {
1162                 if ((reg_cfg_cmd->u.rw_info.reg_offset >
1163                         (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1164                         ((reg_cfg_cmd->u.rw_info.reg_offset +
1165                         reg_cfg_cmd->u.rw_info.len) >
1166                         vfe_dev->vfe_base_size) ||
1167                         (reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
1168                         pr_err_ratelimited("%s:%d regoffset %d len %d res %d\n",
1169                                 __func__, __LINE__,
1170                                 reg_cfg_cmd->u.rw_info.reg_offset,
1171                                 reg_cfg_cmd->u.rw_info.len,
1172                                 (uint32_t)vfe_dev->vfe_base_size);
1173                         return -EINVAL;
1174                 }
1175
1176                 if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
1177                         (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
1178                         ((reg_cfg_cmd->u.rw_info.cmd_data_offset +
1179                         reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
1180                         pr_err_ratelimited("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
1181                                 __func__, __LINE__,
1182                                 reg_cfg_cmd->u.rw_info.cmd_data_offset,
1183                                 reg_cfg_cmd->u.rw_info.len, cmd_len);
1184                         return -EINVAL;
1185                 }
1186                 break;
1187         }
1188
1189         case VFE_WRITE_DMI_16BIT:
1190         case VFE_WRITE_DMI_32BIT:
1191         case VFE_WRITE_DMI_64BIT:
1192         case VFE_READ_DMI_16BIT:
1193         case VFE_READ_DMI_32BIT:
1194         case VFE_READ_DMI_64BIT: {
1195                 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
1196                         reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1197                         if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
1198                                 reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
1199                                 (reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
1200                                 reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
1201                                 (sizeof(uint32_t)))) {
1202                                 pr_err("%s:%d hi %d lo %d\n",
1203                                         __func__, __LINE__,
1204                                         reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1205                                         reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
1206                                 return -EINVAL;
1207                         }
1208                         if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
1209                                 pr_err("%s:%d len %d\n",
1210                                         __func__, __LINE__,
1211                                         reg_cfg_cmd->u.dmi_info.len);
1212                                 return -EINVAL;
1213                         }
1214                         if (((UINT_MAX -
1215                                 reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
1216                                 (reg_cfg_cmd->u.dmi_info.len -
1217                                 sizeof(uint32_t))) ||
1218                                 ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
1219                                 reg_cfg_cmd->u.dmi_info.len -
1220                                 sizeof(uint32_t)) > cmd_len)) {
1221                                 pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
1222                                         __func__, __LINE__,
1223                                         reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
1224                                         reg_cfg_cmd->u.dmi_info.len, cmd_len);
1225                                 return -EINVAL;
1226                         }
1227                 }
1228                 if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
1229                         (UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
1230                         ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
1231                         reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
1232                         pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
1233                                 __func__, __LINE__,
1234                                 reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
1235                                 reg_cfg_cmd->u.dmi_info.len, cmd_len);
1236                         return -EINVAL;
1237                 }
1238                 break;
1239         }
1240
1241         default:
1242                 break;
1243         }
1244
1245         switch (reg_cfg_cmd->cmd_type) {
1246         case VFE_WRITE: {
1247                 msm_camera_io_memcpy(vfe_dev->vfe_base +
1248                         reg_cfg_cmd->u.rw_info.reg_offset,
1249                         (void __iomem *)
1250                         (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
1251                         reg_cfg_cmd->u.rw_info.len);
1252                 break;
1253         }
1254         case VFE_WRITE_MB: {
1255                 msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
1256                         reg_cfg_cmd->u.rw_info.reg_offset,
1257                         (void __iomem *)
1258                         (cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4),
1259                         reg_cfg_cmd->u.rw_info.len);
1260                 break;
1261         }
1262         case VFE_CFG_MASK: {
1263                 uint32_t temp;
1264                 bool grab_lock;
1265                 unsigned long flags;
1266
1267                 if ((UINT_MAX - sizeof(temp) <
1268                         reg_cfg_cmd->u.mask_info.reg_offset) ||
1269                         (vfe_dev->vfe_base_size <
1270                         reg_cfg_cmd->u.mask_info.reg_offset +
1271                         sizeof(temp)) ||
1272                         (reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
1273                         pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
1274                         return -EINVAL;
1275                 }
1276                 grab_lock = vfe_dev->hw_info->vfe_ops.core_ops.
1277                         is_module_cfg_lock_needed(reg_cfg_cmd->
1278                         u.mask_info.reg_offset);
1279                 if (grab_lock)
1280                         spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
1281                 temp = msm_camera_io_r(vfe_dev->vfe_base +
1282                         reg_cfg_cmd->u.mask_info.reg_offset);
1283
1284                 temp &= ~reg_cfg_cmd->u.mask_info.mask;
1285                 temp |= reg_cfg_cmd->u.mask_info.val;
1286                 msm_camera_io_w(temp, vfe_dev->vfe_base +
1287                         reg_cfg_cmd->u.mask_info.reg_offset);
1288                 if (grab_lock)
1289                         spin_unlock_irqrestore(&vfe_dev->shared_data_lock,
1290                                 flags);
1291                 break;
1292         }
1293         case VFE_WRITE_DMI_16BIT:
1294         case VFE_WRITE_DMI_32BIT:
1295         case VFE_WRITE_DMI_64BIT: {
1296                 int i;
1297                 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1298                 uint32_t hi_val, lo_val, lo_val1;
1299
1300                 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
1301                         hi_tbl_ptr = cfg_data +
1302                                 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1303                 }
1304                 lo_tbl_ptr = cfg_data +
1305                         reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1306                 if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
1307                         reg_cfg_cmd->u.dmi_info.len =
1308                                 reg_cfg_cmd->u.dmi_info.len / 2;
1309                 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1310                         lo_val = *lo_tbl_ptr++;
1311                         if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
1312                                 lo_val1 = lo_val & 0x0000FFFF;
1313                                 lo_val = (lo_val & 0xFFFF0000)>>16;
1314                                 msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
1315                                         vfe_dev->hw_info->dmi_reg_offset + 0x4);
1316                         } else if (reg_cfg_cmd->cmd_type ==
1317                                            VFE_WRITE_DMI_64BIT) {
1318                                 lo_tbl_ptr++;
1319                                 hi_val = *hi_tbl_ptr;
1320                                 hi_tbl_ptr = hi_tbl_ptr + 2;
1321                                 msm_camera_io_w(hi_val, vfe_dev->vfe_base +
1322                                         vfe_dev->hw_info->dmi_reg_offset);
1323                         }
1324                         msm_camera_io_w(lo_val, vfe_dev->vfe_base +
1325                                 vfe_dev->hw_info->dmi_reg_offset + 0x4);
1326                 }
1327                 break;
1328         }
1329         case VFE_READ_DMI_16BIT:
1330         case VFE_READ_DMI_32BIT:
1331         case VFE_READ_DMI_64BIT: {
1332                 int i;
1333                 uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
1334                 uint32_t hi_val, lo_val, lo_val1;
1335
1336                 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1337                         hi_tbl_ptr = cfg_data +
1338                                 reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
1339                 }
1340
1341                 lo_tbl_ptr = cfg_data +
1342                         reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
1343
1344                 if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
1345                         reg_cfg_cmd->u.dmi_info.len =
1346                                 reg_cfg_cmd->u.dmi_info.len / 2;
1347
1348                 for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
1349                         lo_val = msm_camera_io_r(vfe_dev->vfe_base +
1350                                         vfe_dev->hw_info->dmi_reg_offset + 0x4);
1351
1352                         if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
1353                                 lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
1354                                         vfe_dev->hw_info->dmi_reg_offset + 0x4);
1355                                 lo_val |= lo_val1 << 16;
1356                         }
1357                         *lo_tbl_ptr++ = lo_val;
1358                         if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
1359                                 hi_val = msm_camera_io_r(vfe_dev->vfe_base +
1360                                         vfe_dev->hw_info->dmi_reg_offset);
1361                                 *hi_tbl_ptr = hi_val;
1362                                 hi_tbl_ptr += 2;
1363                                 lo_tbl_ptr++;
1364                         }
1365                 }
1366                 break;
1367         }
1368         case VFE_HW_UPDATE_LOCK: {
1369                 uint32_t update_id =
1370                         vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
1371                 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
1372                         || update_id == *cfg_data) {
1373                         pr_err("%s hw update lock failed acq %d, cur id %u, last id %u\n",
1374                                 __func__,
1375                                 *cfg_data,
1376                                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
1377                                 update_id);
1378                         return -EINVAL;
1379                 }
1380                 break;
1381         }
1382         case VFE_HW_UPDATE_UNLOCK: {
1383                 if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
1384                         != *cfg_data) {
1385                         pr_err("hw update across frame boundary,begin id %u, end id %d\n",
1386                                 *cfg_data,
1387                                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
1388                 }
1389                 vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
1390                         vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1391                 break;
1392         }
1393         case VFE_READ: {
1394                 int i;
1395                 uint32_t *data_ptr = cfg_data +
1396                         reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
1397                 for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
1398                         if ((data_ptr < cfg_data) ||
1399                                 (UINT_MAX / sizeof(*data_ptr) <
1400                                  (data_ptr - cfg_data)) ||
1401                                 (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
1402                                  cmd_len))
1403                                 return -EINVAL;
1404                         *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
1405                                 reg_cfg_cmd->u.rw_info.reg_offset);
1406                         reg_cfg_cmd->u.rw_info.reg_offset += 4;
1407                 }
1408                 break;
1409         }
1410         case GET_MAX_CLK_RATE: {
1411                 int rc = 0;
1412                 unsigned long rate;
1413
1414                 if (cmd_len != sizeof(__u32)) {
1415                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1416                                 __func__, __LINE__, cmd_len,
1417                                 sizeof(__u32));
1418                         return -EINVAL;
1419                 }
1420                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_max_clk_rate(
1421                                                         vfe_dev, &rate);
1422                 if (rc < 0) {
1423                         pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1424                         return -EINVAL;
1425                 }
1426
1427                 *(__u32 *)cfg_data = (__u32)rate;
1428
1429                 break;
1430         }
1431         case GET_CLK_RATES: {
1432                 int rc = 0;
1433                 struct msm_isp_clk_rates rates;
1434                 struct msm_isp_clk_rates *user_data =
1435                         (struct msm_isp_clk_rates *)cfg_data;
1436                 if (cmd_len != sizeof(struct msm_isp_clk_rates)) {
1437                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1438                                 __func__, __LINE__, cmd_len,
1439                                 sizeof(struct msm_isp_clk_rates));
1440                         return -EINVAL;
1441                 }
1442                 rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(
1443                                                         vfe_dev, &rates);
1444                 if (rc < 0) {
1445                         pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
1446                         return -EINVAL;
1447                 }
1448                 user_data->svs_rate = rates.svs_rate;
1449                 user_data->nominal_rate = rates.nominal_rate;
1450                 user_data->high_rate = rates.high_rate;
1451                 break;
1452         }
1453         case GET_ISP_ID: {
1454                 uint32_t *isp_id = NULL;
1455
1456                 if (cmd_len < sizeof(uint32_t)) {
1457                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1458                                 __func__, __LINE__, cmd_len,
1459                                 sizeof(uint32_t));
1460                         return -EINVAL;
1461                 }
1462
1463                 isp_id = (uint32_t *)cfg_data;
1464                 *isp_id = vfe_dev->pdev->id;
1465                 break;
1466         }
1467         case SET_WM_UB_SIZE:
1468                 break;
1469         case SET_UB_POLICY: {
1470
1471                 if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
1472                         pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
1473                                 __func__, __LINE__, cmd_len,
1474                                 sizeof(vfe_dev->vfe_ub_policy));
1475                         return -EINVAL;
1476                 }
1477                 vfe_dev->vfe_ub_policy = *cfg_data;
1478                 break;
1479         }
1480         }
1481         return 0;
1482 }
1483
1484 int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
1485 {
1486         int rc = 0, i;
1487         struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
1488         struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
1489         uint32_t *cfg_data = NULL;
1490
1491         if (!proc_cmd->num_cfg
1492                 || proc_cmd->num_cfg > MAX_ISP_CMD_NUM) {
1493                 pr_err("%s: num_cfg outside allowed range\n",
1494                         __func__);
1495                 return -EINVAL;
1496         }
1497
1498         reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
1499                 proc_cmd->num_cfg, GFP_KERNEL);
1500         if (!reg_cfg_cmd) {
1501                 rc = -ENOMEM;
1502                 goto reg_cfg_failed;
1503         }
1504
1505         if (copy_from_user(reg_cfg_cmd,
1506                 (void __user *)(proc_cmd->cfg_cmd),
1507                 sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
1508                 rc = -EFAULT;
1509                 goto copy_cmd_failed;
1510         }
1511
1512         if (proc_cmd->cmd_len > 0) {
1513                 if (proc_cmd->cmd_len > MAX_ISP_CMD_LEN) {
1514                         pr_err("%s: cmd_len exceed max allowed\n", __func__);
1515                         rc = -EINVAL;
1516                         goto cfg_data_failed;
1517                 }
1518
1519                 cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
1520                 if (!cfg_data) {
1521                         rc = -ENOMEM;
1522                         goto cfg_data_failed;
1523                 }
1524
1525                 if (copy_from_user(cfg_data,
1526                         (void __user *)(proc_cmd->cfg_data),
1527                         proc_cmd->cmd_len)) {
1528                         rc = -EFAULT;
1529                         goto copy_cmd_failed;
1530                 }
1531         }
1532
1533         for (i = 0; i < proc_cmd->num_cfg; i++)
1534                 rc = msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
1535                         cfg_data, proc_cmd->cmd_len);
1536
1537         if (copy_to_user(proc_cmd->cfg_data,
1538                         cfg_data, proc_cmd->cmd_len)) {
1539                 rc = -EFAULT;
1540                 goto copy_cmd_failed;
1541         }
1542
1543 copy_cmd_failed:
1544         kfree(cfg_data);
1545 cfg_data_failed:
1546         kfree(reg_cfg_cmd);
1547 reg_cfg_failed:
1548         return rc;
1549 }
1550
1551 int msm_isp_send_event(struct vfe_device *vfe_dev,
1552         uint32_t event_type,
1553         struct msm_isp_event_data *event_data)
1554 {
1555         struct v4l2_event isp_event;
1556
1557         memset(&isp_event, 0, sizeof(struct v4l2_event));
1558         isp_event.id = 0;
1559         isp_event.type = event_type;
1560
1561         memcpy(&isp_event.u.data[0], event_data,
1562                 sizeof(struct msm_isp_event_data));
1563         v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
1564         return 0;
1565 }
1566
1567 #define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
1568
1569 int msm_isp_cal_word_per_line(uint32_t output_format,
1570         uint32_t pixel_per_line)
1571 {
1572         int val = -1;
1573
1574         switch (output_format) {
1575         case V4L2_PIX_FMT_SBGGR8:
1576         case V4L2_PIX_FMT_SGBRG8:
1577         case V4L2_PIX_FMT_SGRBG8:
1578         case V4L2_PIX_FMT_SRGGB8:
1579         case V4L2_PIX_FMT_QBGGR8:
1580         case V4L2_PIX_FMT_QGBRG8:
1581         case V4L2_PIX_FMT_QGRBG8:
1582         case V4L2_PIX_FMT_QRGGB8:
1583         case V4L2_PIX_FMT_JPEG:
1584         case V4L2_PIX_FMT_META:
1585                 val = CAL_WORD(pixel_per_line, 1, 8);
1586                 break;
1587         case V4L2_PIX_FMT_SBGGR10:
1588         case V4L2_PIX_FMT_SGBRG10:
1589         case V4L2_PIX_FMT_SGRBG10:
1590         case V4L2_PIX_FMT_SRGGB10:
1591         case V4L2_PIX_FMT_Y10:
1592         case V4L2_PIX_FMT_SBGGR10DPCM6:
1593         case V4L2_PIX_FMT_SGBRG10DPCM6:
1594         case V4L2_PIX_FMT_SGRBG10DPCM6:
1595         case V4L2_PIX_FMT_SRGGB10DPCM6:
1596         case V4L2_PIX_FMT_SBGGR10DPCM8:
1597         case V4L2_PIX_FMT_SGBRG10DPCM8:
1598         case V4L2_PIX_FMT_SGRBG10DPCM8:
1599         case V4L2_PIX_FMT_SRGGB10DPCM8:
1600         case V4L2_PIX_FMT_META10:
1601                 val = CAL_WORD(pixel_per_line, 5, 32);
1602                 break;
1603         case V4L2_PIX_FMT_SBGGR12:
1604         case V4L2_PIX_FMT_SGBRG12:
1605         case V4L2_PIX_FMT_SGRBG12:
1606         case V4L2_PIX_FMT_SRGGB12:
1607         case V4L2_PIX_FMT_Y12:
1608                 val = CAL_WORD(pixel_per_line, 3, 16);
1609                 break;
1610         case V4L2_PIX_FMT_SBGGR14:
1611         case V4L2_PIX_FMT_SGBRG14:
1612         case V4L2_PIX_FMT_SGRBG14:
1613         case V4L2_PIX_FMT_SRGGB14:
1614                 val = CAL_WORD(pixel_per_line, 7, 32);
1615                 break;
1616         case V4L2_PIX_FMT_QBGGR10:
1617         case V4L2_PIX_FMT_QGBRG10:
1618         case V4L2_PIX_FMT_QGRBG10:
1619         case V4L2_PIX_FMT_QRGGB10:
1620                 val = CAL_WORD(pixel_per_line, 1, 6);
1621                 break;
1622         case V4L2_PIX_FMT_QBGGR12:
1623         case V4L2_PIX_FMT_QGBRG12:
1624         case V4L2_PIX_FMT_QGRBG12:
1625         case V4L2_PIX_FMT_QRGGB12:
1626                 val = CAL_WORD(pixel_per_line, 1, 5);
1627                 break;
1628         case V4L2_PIX_FMT_QBGGR14:
1629         case V4L2_PIX_FMT_QGBRG14:
1630         case V4L2_PIX_FMT_QGRBG14:
1631         case V4L2_PIX_FMT_QRGGB14:
1632                 val = CAL_WORD(pixel_per_line, 1, 4);
1633                 break;
1634         case V4L2_PIX_FMT_NV12:
1635         case V4L2_PIX_FMT_NV21:
1636         case V4L2_PIX_FMT_NV14:
1637         case V4L2_PIX_FMT_NV41:
1638         case V4L2_PIX_FMT_NV16:
1639         case V4L2_PIX_FMT_NV61:
1640         case V4L2_PIX_FMT_GREY:
1641                 val = CAL_WORD(pixel_per_line, 1, 8);
1642                 break;
1643         case V4L2_PIX_FMT_YUYV:
1644         case V4L2_PIX_FMT_YVYU:
1645         case V4L2_PIX_FMT_UYVY:
1646         case V4L2_PIX_FMT_VYUY:
1647                 val = CAL_WORD(pixel_per_line, 2, 8);
1648         break;
1649         case V4L2_PIX_FMT_P16BGGR10:
1650         case V4L2_PIX_FMT_P16GBRG10:
1651         case V4L2_PIX_FMT_P16GRBG10:
1652         case V4L2_PIX_FMT_P16RGGB10:
1653                 val = CAL_WORD(pixel_per_line, 1, 4);
1654         break;
1655         case V4L2_PIX_FMT_NV24:
1656         case V4L2_PIX_FMT_NV42:
1657                 val = CAL_WORD(pixel_per_line, 1, 8);
1658         break;
1659                 /* TD: Add more image format */
1660         default:
1661                 msm_isp_print_fourcc_error(__func__, output_format);
1662                 break;
1663         }
1664         return val;
1665 }
1666
1667 enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
1668 {
1669         switch (output_format) {
1670         case V4L2_PIX_FMT_SBGGR8:
1671         case V4L2_PIX_FMT_SGBRG8:
1672         case V4L2_PIX_FMT_SGRBG8:
1673         case V4L2_PIX_FMT_SRGGB8:
1674         case V4L2_PIX_FMT_SBGGR10:
1675         case V4L2_PIX_FMT_SGBRG10:
1676         case V4L2_PIX_FMT_SGRBG10:
1677         case V4L2_PIX_FMT_SRGGB10:
1678         case V4L2_PIX_FMT_SBGGR10DPCM6:
1679         case V4L2_PIX_FMT_SGBRG10DPCM6:
1680         case V4L2_PIX_FMT_SGRBG10DPCM6:
1681         case V4L2_PIX_FMT_SRGGB10DPCM6:
1682         case V4L2_PIX_FMT_SBGGR10DPCM8:
1683         case V4L2_PIX_FMT_SGBRG10DPCM8:
1684         case V4L2_PIX_FMT_SGRBG10DPCM8:
1685         case V4L2_PIX_FMT_SRGGB10DPCM8:
1686         case V4L2_PIX_FMT_SBGGR12:
1687         case V4L2_PIX_FMT_SGBRG12:
1688         case V4L2_PIX_FMT_SGRBG12:
1689         case V4L2_PIX_FMT_SRGGB12:
1690         case V4L2_PIX_FMT_SBGGR14:
1691         case V4L2_PIX_FMT_SGBRG14:
1692         case V4L2_PIX_FMT_SGRBG14:
1693         case V4L2_PIX_FMT_SRGGB14:
1694         case V4L2_PIX_FMT_GREY:
1695         case V4L2_PIX_FMT_Y10:
1696         case V4L2_PIX_FMT_Y12:
1697                 return MIPI;
1698         case V4L2_PIX_FMT_QBGGR8:
1699         case V4L2_PIX_FMT_QGBRG8:
1700         case V4L2_PIX_FMT_QGRBG8:
1701         case V4L2_PIX_FMT_QRGGB8:
1702         case V4L2_PIX_FMT_QBGGR10:
1703         case V4L2_PIX_FMT_QGBRG10:
1704         case V4L2_PIX_FMT_QGRBG10:
1705         case V4L2_PIX_FMT_QRGGB10:
1706         case V4L2_PIX_FMT_QBGGR12:
1707         case V4L2_PIX_FMT_QGBRG12:
1708         case V4L2_PIX_FMT_QGRBG12:
1709         case V4L2_PIX_FMT_QRGGB12:
1710         case V4L2_PIX_FMT_QBGGR14:
1711         case V4L2_PIX_FMT_QGBRG14:
1712         case V4L2_PIX_FMT_QGRBG14:
1713         case V4L2_PIX_FMT_QRGGB14:
1714                 return QCOM;
1715         case V4L2_PIX_FMT_P16BGGR10:
1716         case V4L2_PIX_FMT_P16GBRG10:
1717         case V4L2_PIX_FMT_P16GRBG10:
1718         case V4L2_PIX_FMT_P16RGGB10:
1719                 return PLAIN16;
1720         default:
1721                 msm_isp_print_fourcc_error(__func__, output_format);
1722                 break;
1723         }
1724         return -EINVAL;
1725 }
1726
1727 int msm_isp_get_bit_per_pixel(uint32_t output_format)
1728 {
1729         switch (output_format) {
1730         case V4L2_PIX_FMT_Y4:
1731                 return 4;
1732         case V4L2_PIX_FMT_Y6:
1733                 return 6;
1734         case V4L2_PIX_FMT_SBGGR8:
1735         case V4L2_PIX_FMT_SGBRG8:
1736         case V4L2_PIX_FMT_SGRBG8:
1737         case V4L2_PIX_FMT_SRGGB8:
1738         case V4L2_PIX_FMT_QBGGR8:
1739         case V4L2_PIX_FMT_QGBRG8:
1740         case V4L2_PIX_FMT_QGRBG8:
1741         case V4L2_PIX_FMT_QRGGB8:
1742         case V4L2_PIX_FMT_JPEG:
1743         case V4L2_PIX_FMT_META:
1744         case V4L2_PIX_FMT_NV12:
1745         case V4L2_PIX_FMT_NV21:
1746         case V4L2_PIX_FMT_NV14:
1747         case V4L2_PIX_FMT_NV41:
1748         case V4L2_PIX_FMT_YVU410:
1749         case V4L2_PIX_FMT_YVU420:
1750         case V4L2_PIX_FMT_YUYV:
1751         case V4L2_PIX_FMT_YYUV:
1752         case V4L2_PIX_FMT_YVYU:
1753         case V4L2_PIX_FMT_UYVY:
1754         case V4L2_PIX_FMT_VYUY:
1755         case V4L2_PIX_FMT_YUV422P:
1756         case V4L2_PIX_FMT_YUV411P:
1757         case V4L2_PIX_FMT_Y41P:
1758         case V4L2_PIX_FMT_YUV444:
1759         case V4L2_PIX_FMT_YUV555:
1760         case V4L2_PIX_FMT_YUV565:
1761         case V4L2_PIX_FMT_YUV32:
1762         case V4L2_PIX_FMT_YUV410:
1763         case V4L2_PIX_FMT_YUV420:
1764         case V4L2_PIX_FMT_GREY:
1765         case V4L2_PIX_FMT_PAL8:
1766         case V4L2_PIX_FMT_UV8:
1767         case MSM_V4L2_PIX_FMT_META:
1768                 return 8;
1769         case V4L2_PIX_FMT_SBGGR10:
1770         case V4L2_PIX_FMT_SGBRG10:
1771         case V4L2_PIX_FMT_SGRBG10:
1772         case V4L2_PIX_FMT_SRGGB10:
1773         case V4L2_PIX_FMT_SBGGR10DPCM6:
1774         case V4L2_PIX_FMT_SGBRG10DPCM6:
1775         case V4L2_PIX_FMT_SGRBG10DPCM6:
1776         case V4L2_PIX_FMT_SRGGB10DPCM6:
1777         case V4L2_PIX_FMT_SBGGR10DPCM8:
1778         case V4L2_PIX_FMT_SGBRG10DPCM8:
1779         case V4L2_PIX_FMT_SGRBG10DPCM8:
1780         case V4L2_PIX_FMT_SRGGB10DPCM8:
1781         case V4L2_PIX_FMT_QBGGR10:
1782         case V4L2_PIX_FMT_QGBRG10:
1783         case V4L2_PIX_FMT_QGRBG10:
1784         case V4L2_PIX_FMT_QRGGB10:
1785         case V4L2_PIX_FMT_Y10:
1786         case V4L2_PIX_FMT_Y10BPACK:
1787         case V4L2_PIX_FMT_P16BGGR10:
1788         case V4L2_PIX_FMT_P16GBRG10:
1789         case V4L2_PIX_FMT_P16GRBG10:
1790         case V4L2_PIX_FMT_P16RGGB10:
1791         case V4L2_PIX_FMT_META10:
1792         case MSM_V4L2_PIX_FMT_META10:
1793                 return 10;
1794         case V4L2_PIX_FMT_SBGGR12:
1795         case V4L2_PIX_FMT_SGBRG12:
1796         case V4L2_PIX_FMT_SGRBG12:
1797         case V4L2_PIX_FMT_SRGGB12:
1798         case V4L2_PIX_FMT_QBGGR12:
1799         case V4L2_PIX_FMT_QGBRG12:
1800         case V4L2_PIX_FMT_QGRBG12:
1801         case V4L2_PIX_FMT_QRGGB12:
1802         case V4L2_PIX_FMT_Y12:
1803                 return 12;
1804         case V4L2_PIX_FMT_SBGGR14:
1805         case V4L2_PIX_FMT_SGBRG14:
1806         case V4L2_PIX_FMT_SGRBG14:
1807         case V4L2_PIX_FMT_SRGGB14:
1808         case V4L2_PIX_FMT_QBGGR14:
1809         case V4L2_PIX_FMT_QGBRG14:
1810         case V4L2_PIX_FMT_QGRBG14:
1811         case V4L2_PIX_FMT_QRGGB14:
1812                 return 14;
1813         case V4L2_PIX_FMT_NV16:
1814         case V4L2_PIX_FMT_NV61:
1815         case V4L2_PIX_FMT_Y16:
1816                 return 16;
1817         case V4L2_PIX_FMT_NV24:
1818         case V4L2_PIX_FMT_NV42:
1819                 return 24;
1820                 /* TD: Add more image format */
1821         default:
1822                 msm_isp_print_fourcc_error(__func__, output_format);
1823                 pr_err("%s: Invalid output format %x\n",
1824                         __func__, output_format);
1825                 return -EINVAL;
1826         }
1827 }
1828
1829 void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
1830 {
1831         struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1832
1833         error_info->info_dump_frame_count++;
1834 }
1835
1836
1837 static int msm_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
1838 {
1839         int rc = vfe_dev->buf_mgr->pagefault_debug_disable;
1840
1841         pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %pK\n", __func__,
1842                 __LINE__,  vfe_dev->pdev->id, vfe_dev);
1843
1844         msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
1845
1846         if (vfe_dev->buf_mgr->pagefault_debug_disable == 0) {
1847                 vfe_dev->buf_mgr->pagefault_debug_disable = 1;
1848                 vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
1849                         vfe_dev->page_fault_addr);
1850                 msm_isp_print_ping_pong_address(vfe_dev,
1851                         vfe_dev->page_fault_addr);
1852                 vfe_dev->hw_info->vfe_ops.axi_ops.
1853                         read_wm_ping_pong_addr(vfe_dev);
1854         }
1855         return rc;
1856 }
1857
1858 void msm_isp_process_error_info(struct vfe_device *vfe_dev)
1859 {
1860         struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
1861
1862         if (error_info->error_count == 1 ||
1863                 !(error_info->info_dump_frame_count % 100)) {
1864                 vfe_dev->hw_info->vfe_ops.core_ops.
1865                         process_error_status(vfe_dev);
1866                 error_info->error_mask0 = 0;
1867                 error_info->error_mask1 = 0;
1868                 error_info->camif_status = 0;
1869                 error_info->violation_status = 0;
1870         }
1871 }
1872
1873 static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
1874         uint32_t error_mask0, uint32_t error_mask1)
1875 {
1876         vfe_dev->error_info.error_mask0 |= error_mask0;
1877         vfe_dev->error_info.error_mask1 |= error_mask1;
1878         vfe_dev->error_info.error_count++;
1879 }
1880
1881 void msm_isp_process_overflow_irq(
1882         struct vfe_device *vfe_dev,
1883         uint32_t *irq_status0, uint32_t *irq_status1,
1884         uint32_t force_overflow)
1885 {
1886         uint32_t overflow_mask;
1887
1888         /* if there are no active streams - do not start recovery */
1889         if (!vfe_dev->axi_data.num_active_stream)
1890                 return;
1891
1892         /* Mask out all other irqs if recovery is started */
1893         if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
1894                 uint32_t halt_restart_mask0, halt_restart_mask1;
1895
1896                 vfe_dev->hw_info->vfe_ops.core_ops.
1897                 get_halt_restart_mask(&halt_restart_mask0,
1898                         &halt_restart_mask1);
1899                 *irq_status0 &= halt_restart_mask0;
1900                 *irq_status1 &= halt_restart_mask1;
1901
1902                 return;
1903         }
1904
1905         /* Check if any overflow bit is set */
1906         vfe_dev->hw_info->vfe_ops.core_ops.
1907                 get_overflow_mask(&overflow_mask);
1908         if (!force_overflow)
1909                 overflow_mask &= *irq_status1;
1910
1911         if (overflow_mask) {
1912                 struct msm_isp_event_data error_event;
1913
1914                 if (vfe_dev->reset_pending == 1) {
1915                         pr_err("%s:%d failed: overflow %x during reset\n",
1916                                 __func__, __LINE__, overflow_mask);
1917                         /* Clear overflow bits since reset is pending */
1918                         *irq_status1 &= ~overflow_mask;
1919                         return;
1920                 }
1921
1922                 ISP_DBG("%s: VFE%d Bus overflow detected: start recovery!\n",
1923                         __func__, vfe_dev->pdev->id);
1924
1925
1926                 /* maks off irq for current vfe */
1927                 atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
1928                         NO_OVERFLOW, OVERFLOW_DETECTED);
1929                 vfe_dev->recovery_irq0_mask = vfe_dev->irq0_mask;
1930                 vfe_dev->recovery_irq1_mask = vfe_dev->irq1_mask;
1931
1932                 vfe_dev->hw_info->vfe_ops.core_ops.
1933                         set_halt_restart_mask(vfe_dev);
1934
1935                 /* mask off other vfe if dual vfe is used */
1936                 if (vfe_dev->is_split) {
1937                         uint32_t other_vfe_id;
1938                         struct vfe_device *other_vfe_dev;
1939
1940                         other_vfe_id = (vfe_dev->pdev->id == ISP_VFE0) ?
1941                                 ISP_VFE1 : ISP_VFE0;
1942                         other_vfe_dev = vfe_dev->common_data->
1943                                 dual_vfe_res->vfe_dev[other_vfe_id];
1944                         if (other_vfe_dev) {
1945                                 other_vfe_dev->recovery_irq0_mask =
1946                                         other_vfe_dev->irq0_mask;
1947                                 other_vfe_dev->recovery_irq1_mask =
1948                                         other_vfe_dev->irq1_mask;
1949                         }
1950
1951                         atomic_cmpxchg(&(vfe_dev->common_data->dual_vfe_res->
1952                                 vfe_dev[other_vfe_id]->
1953                                 error_info.overflow_state),
1954                                 NO_OVERFLOW, OVERFLOW_DETECTED);
1955
1956                         vfe_dev->hw_info->vfe_ops.core_ops.
1957                                 set_halt_restart_mask(vfe_dev->common_data->
1958                                 dual_vfe_res->vfe_dev[other_vfe_id]);
1959                 }
1960
1961                 /* reset irq status so skip further process */
1962                 *irq_status0 = 0;
1963                 *irq_status1 = 0;
1964
1965                 /* send overflow event as needed */
1966                 if (atomic_read(&vfe_dev->error_info.overflow_state)
1967                         != HALT_ENFORCED) {
1968                         memset(&error_event, 0, sizeof(error_event));
1969                         error_event.frame_id =
1970                                 vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
1971                         error_event.u.error_info.err_type =
1972                                 ISP_ERROR_BUS_OVERFLOW;
1973                         msm_isp_send_event(vfe_dev,
1974                                 ISP_EVENT_ERROR, &error_event);
1975                 }
1976         }
1977 }
1978
1979 void msm_isp_reset_burst_count_and_frame_drop(
1980         struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
1981 {
1982         if ((stream_info->state != ACTIVE) ||
1983                 (stream_info->stream_type != BURST_STREAM)) {
1984                 return;
1985         }
1986         if (stream_info->num_burst_capture != 0)
1987                 msm_isp_reset_framedrop(vfe_dev, stream_info);
1988 }
1989
1990 static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
1991         uint32_t irq_status0, uint32_t irq_status1,
1992         uint32_t ping_pong_status)
1993 {
1994         unsigned long flags;
1995         struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
1996
1997         spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
1998         queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
1999         if (queue_cmd->cmd_used) {
2000                 ISP_DBG("%s: Tasklet queue overflow: %d\n",
2001                         __func__, vfe_dev->pdev->id);
2002                 list_del(&queue_cmd->list);
2003         } else {
2004                 atomic_add(1, &vfe_dev->irq_cnt);
2005         }
2006         queue_cmd->vfeInterruptStatus0 = irq_status0;
2007         queue_cmd->vfeInterruptStatus1 = irq_status1;
2008         queue_cmd->vfePingPongStatus = ping_pong_status;
2009         msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
2010         queue_cmd->cmd_used = 1;
2011         vfe_dev->taskletq_idx = (vfe_dev->taskletq_idx + 1) %
2012                 MSM_VFE_TASKLETQ_SIZE;
2013         list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
2014         spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2015         tasklet_hi_schedule(&vfe_dev->vfe_tasklet);
2016 }
2017
2018 irqreturn_t msm_isp_process_irq(int irq_num, void *data)
2019 {
2020         struct vfe_device *vfe_dev = (struct vfe_device *) data;
2021         uint32_t irq_status0, irq_status1, ping_pong_status;
2022         uint32_t error_mask0, error_mask1;
2023
2024         vfe_dev->hw_info->vfe_ops.irq_ops.
2025                 read_irq_status_and_clear(vfe_dev, &irq_status0, &irq_status1);
2026
2027         if ((irq_status0 == 0) && (irq_status1 == 0)) {
2028                 pr_err("%s:VFE%d irq_status0 & 1 are both 0\n",
2029                         __func__, vfe_dev->pdev->id);
2030                 return IRQ_HANDLED;
2031         }
2032
2033         ping_pong_status = vfe_dev->hw_info->vfe_ops.axi_ops.
2034                 get_pingpong_status(vfe_dev);
2035         if (vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq) {
2036                 vfe_dev->hw_info->vfe_ops.irq_ops.process_eof_irq(vfe_dev,
2037                         irq_status0);
2038         }
2039         msm_isp_process_overflow_irq(vfe_dev,
2040                 &irq_status0, &irq_status1, 0);
2041
2042         vfe_dev->hw_info->vfe_ops.core_ops.
2043                 get_error_mask(&error_mask0, &error_mask1);
2044         error_mask0 &= irq_status0;
2045         error_mask1 &= irq_status1;
2046         irq_status0 &= ~error_mask0;
2047         irq_status1 &= ~error_mask1;
2048         if ((error_mask0 != 0) || (error_mask1 != 0))
2049                 msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
2050
2051         if ((irq_status0 == 0) && (irq_status1 == 0) &&
2052                 (!(((error_mask0 != 0) || (error_mask1 != 0)) &&
2053                  vfe_dev->error_info.error_count == 1))) {
2054                 ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
2055                 return IRQ_HANDLED;
2056         }
2057         dump_data.vfe_dev = (struct vfe_device *) data;
2058         if (vfe_dev->is_split &&
2059                 (vfe_dev->common_data->dual_vfe_res->vfe_dev[
2060                         !vfe_dev->pdev->id]) &&
2061                 (vfe_dev->common_data->dual_vfe_res->vfe_dev[
2062                         !vfe_dev->pdev->id]->vfe_open_cnt)) {
2063                 spin_lock(&dump_irq_lock);
2064                 dump_data.arr[dump_data.first].current_vfe_irq.
2065                         vfe_id = vfe_dev->pdev->id;
2066                 dump_data.arr[dump_data.first].current_vfe_irq.
2067                         irq_status0 = irq_status0;
2068                 dump_data.arr[dump_data.first].current_vfe_irq.
2069                         irq_status1 = irq_status1;
2070                 dump_data.arr[dump_data.first].current_vfe_irq.
2071                         ping_pong_status = ping_pong_status;
2072
2073                 dump_data.arr[dump_data.first].other_vfe.
2074                         vfe_id = (!vfe_dev->pdev->id);
2075                 vfe_dev->hw_info->vfe_ops.irq_ops.
2076                         read_irq_status(
2077                         vfe_dev->common_data->dual_vfe_res->vfe_dev[
2078                         !vfe_dev->pdev->id],
2079                         &dump_data.arr[dump_data.first].other_vfe.irq_status0,
2080                         &dump_data.arr[dump_data.first].other_vfe.irq_status1);
2081                         dump_data.arr[dump_data.first].other_vfe.
2082                 ping_pong_status =
2083                         vfe_dev->hw_info->vfe_ops.axi_ops.
2084                                 get_pingpong_status(
2085                                 vfe_dev->common_data->dual_vfe_res->vfe_dev[
2086                                         !vfe_dev->pdev->id]);
2087                 msm_isp_get_timestamp(&dump_data.arr[dump_data.first].
2088                         other_vfe.ts, vfe_dev);
2089                 dump_data.first =
2090                         (dump_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2091                 dump_data.fill_count++;
2092                 spin_unlock(&dump_irq_lock);
2093         }
2094         msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1,
2095                                         ping_pong_status);
2096
2097         return IRQ_HANDLED;
2098 }
2099
2100
2101 void msm_isp_do_tasklet(unsigned long data)
2102 {
2103         unsigned long flags;
2104         struct vfe_device *vfe_dev = (struct vfe_device *) data;
2105         struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
2106         struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2107         struct msm_isp_timestamp ts;
2108         uint32_t irq_status0, irq_status1, pingpong_status;
2109
2110         if (vfe_dev->vfe_base == NULL || vfe_dev->vfe_open_cnt == 0) {
2111                 ISP_DBG("%s: VFE%d open cnt = %d, device closed(base = %pK)\n",
2112                         __func__, vfe_dev->pdev->id, vfe_dev->vfe_open_cnt,
2113                         vfe_dev->vfe_base);
2114                 return;
2115         }
2116
2117         while (atomic_read(&vfe_dev->irq_cnt)) {
2118                 spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
2119                 queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
2120                 struct msm_vfe_tasklet_queue_cmd, list);
2121
2122                 if (!queue_cmd) {
2123                         atomic_set(&vfe_dev->irq_cnt, 0);
2124                         spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2125                         return;
2126                 }
2127                 atomic_sub(1, &vfe_dev->irq_cnt);
2128                 list_del(&queue_cmd->list);
2129
2130                 if (!vfe_dev->clk_enabled) {
2131                         /* client closed, delayed task should exit directly */
2132                         spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2133                         return;
2134                 }
2135
2136                 queue_cmd->cmd_used = 0;
2137                 irq_status0 = queue_cmd->vfeInterruptStatus0;
2138                 irq_status1 = queue_cmd->vfeInterruptStatus1;
2139                 pingpong_status = queue_cmd->vfePingPongStatus;
2140                 ts = queue_cmd->ts;
2141                 /* related to rw reg, need to be protected */
2142                 irq_ops->process_halt_irq(vfe_dev,
2143                         irq_status0, irq_status1);
2144                 spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2145                 ISP_DBG("%s: vfe_id %d status0: 0x%x status1: 0x%x\n",
2146                         __func__, vfe_dev->pdev->id, irq_status0, irq_status1);
2147                 if (vfe_dev->is_split) {
2148                         spin_lock(&dump_tasklet_lock);
2149                         tasklet_data.arr[tasklet_data.first].
2150                         current_vfe_irq.vfe_id = vfe_dev->pdev->id;
2151                         tasklet_data.arr[tasklet_data.first].
2152                         current_vfe_irq.core = smp_processor_id();
2153                         tasklet_data.arr[tasklet_data.first].
2154                         current_vfe_irq.irq_status0 = irq_status0;
2155                         tasklet_data.arr[tasklet_data.first].
2156                         current_vfe_irq.irq_status1 = irq_status1;
2157                         tasklet_data.arr[tasklet_data.first].
2158                         current_vfe_irq.ping_pong_status = pingpong_status;
2159                         tasklet_data.arr[tasklet_data.first].
2160                         current_vfe_irq.ts = ts;
2161                         tasklet_data.first =
2162                         (tasklet_data.first + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2163                         spin_unlock(&dump_tasklet_lock);
2164                 }
2165                 irq_ops->process_reset_irq(vfe_dev,
2166                         irq_status0, irq_status1);
2167                 if (atomic_read(&vfe_dev->error_info.overflow_state)
2168                         != NO_OVERFLOW) {
2169                         ISP_DBG("%s: Recovery in processing, Ignore IRQs!!!\n",
2170                                 __func__);
2171                         continue;
2172                 }
2173                 msm_isp_process_error_info(vfe_dev);
2174                 irq_ops->process_stats_irq(vfe_dev,
2175                         irq_status0, irq_status1,
2176                         pingpong_status, &ts);
2177                 irq_ops->process_axi_irq(vfe_dev,
2178                         irq_status0, irq_status1,
2179                         pingpong_status, &ts);
2180                 irq_ops->process_camif_irq(vfe_dev,
2181                         irq_status0, irq_status1, &ts);
2182                 irq_ops->process_reg_update(vfe_dev,
2183                         irq_status0, irq_status1, &ts);
2184                 irq_ops->process_sof_irq(vfe_dev,
2185                         irq_status0, irq_status1, &ts);
2186                 irq_ops->process_epoch_irq(vfe_dev,
2187                         irq_status0, irq_status1, &ts);
2188         }
2189 }
2190
2191 int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
2192 {
2193         struct msm_vfe_axi_src_state *src_state = arg;
2194
2195         if (src_state->input_src >= VFE_SRC_MAX)
2196                 return -EINVAL;
2197         vfe_dev->axi_data.src_info[src_state->input_src].active =
2198         src_state->src_active;
2199         vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
2200         src_state->src_frame_id;
2201         return 0;
2202 }
2203
2204 static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
2205         struct device *dev, unsigned long iova, int flags, void *token)
2206 {
2207         struct vfe_device *vfe_dev = NULL;
2208
2209         if (token) {
2210                 vfe_dev = (struct vfe_device *)token;
2211                 vfe_dev->page_fault_addr = iova;
2212                 if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops ||
2213                         !vfe_dev->axi_data.num_active_stream) {
2214                         pr_err("%s:%d buf_mgr %pK active strms %d\n", __func__,
2215                                 __LINE__, vfe_dev->buf_mgr,
2216                                 vfe_dev->axi_data.num_active_stream);
2217                         goto end;
2218                 }
2219
2220                 mutex_lock(&vfe_dev->core_mutex);
2221                 if (vfe_dev->vfe_open_cnt > 0) {
2222                         atomic_set(&vfe_dev->error_info.overflow_state,
2223                                 HALT_ENFORCED);
2224                         msm_isp_process_iommu_page_fault(vfe_dev);
2225                 } else {
2226                         pr_err("%s: no handling, vfe open cnt = %d\n",
2227                                 __func__, vfe_dev->vfe_open_cnt);
2228                 }
2229                 mutex_unlock(&vfe_dev->core_mutex);
2230         } else {
2231                 ISP_DBG("%s:%d] no token received: %pK\n",
2232                         __func__, __LINE__, token);
2233                 goto end;
2234         }
2235 end:
2236         return;
2237 }
2238
2239 int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2240 {
2241         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2242         long rc = 0;
2243         enum cam_ahb_clk_client id;
2244
2245         ISP_DBG("%s open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2246
2247         if (vfe_dev->common_data == NULL ||
2248                 vfe_dev->common_data->dual_vfe_res == NULL) {
2249                 pr_err("%s: Error in probe. No common_data or dual vfe res\n",
2250                         __func__);
2251                 return -EINVAL;
2252         }
2253
2254         if (vfe_dev->pdev->id == ISP_VFE0)
2255                 vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
2256
2257         mutex_lock(&vfe_dev->realtime_mutex);
2258         mutex_lock(&vfe_dev->core_mutex);
2259
2260         if (vfe_dev->vfe_open_cnt++) {
2261                 mutex_unlock(&vfe_dev->core_mutex);
2262                 mutex_unlock(&vfe_dev->realtime_mutex);
2263                 return 0;
2264         }
2265
2266         vfe_dev->reset_pending = 0;
2267         vfe_dev->isp_sof_debug = 0;
2268         vfe_dev->isp_raw0_debug = 0;
2269         vfe_dev->isp_raw1_debug = 0;
2270         vfe_dev->isp_raw2_debug = 0;
2271
2272         if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
2273                 pr_err("%s: init hardware failed\n", __func__);
2274                 vfe_dev->vfe_open_cnt--;
2275                 mutex_unlock(&vfe_dev->core_mutex);
2276                 mutex_unlock(&vfe_dev->realtime_mutex);
2277                 return -EBUSY;
2278         }
2279
2280         memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2281         atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
2282
2283         vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
2284
2285         vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
2286         ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
2287         rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
2288         if (rc <= 0) {
2289                 pr_err("%s: reset timeout\n", __func__);
2290                 vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2291                 vfe_dev->vfe_open_cnt--;
2292                 mutex_unlock(&vfe_dev->core_mutex);
2293                 mutex_unlock(&vfe_dev->realtime_mutex);
2294                 return -EINVAL;
2295         }
2296
2297         vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
2298
2299         vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr,
2300                 "msm_isp");
2301
2302         memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
2303         memset(&vfe_dev->stats_data, 0,
2304                 sizeof(struct msm_vfe_stats_shared_data));
2305         memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
2306         memset(&vfe_dev->fetch_engine_info, 0,
2307                 sizeof(vfe_dev->fetch_engine_info));
2308         vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
2309         vfe_dev->axi_data.enable_frameid_recovery = 0;
2310         vfe_dev->taskletq_idx = 0;
2311         vfe_dev->vt_enable = 0;
2312         vfe_dev->reg_update_requested = 0;
2313         /* Register page fault handler */
2314         vfe_dev->buf_mgr->pagefault_debug_disable = 0;
2315         cam_smmu_reg_client_page_fault_handler(
2316                         vfe_dev->buf_mgr->iommu_hdl,
2317                         msm_vfe_iommu_fault_handler, vfe_dev);
2318
2319         /* Disable vfe clks and allow device to go XO shutdown mode */
2320         if (vfe_dev->pdev->id == 0)
2321                 id = CAM_AHB_CLIENT_VFE0;
2322         else
2323                 id = CAM_AHB_CLIENT_VFE1;
2324         if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
2325                 pr_err("%s: failed to remove vote for AHB\n", __func__);
2326         vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
2327         vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
2328
2329         mutex_unlock(&vfe_dev->core_mutex);
2330         mutex_unlock(&vfe_dev->realtime_mutex);
2331         return 0;
2332 }
2333
2334 #ifdef CONFIG_MSM_AVTIMER
2335 static void msm_isp_end_avtimer(void)
2336 {
2337         avcs_core_disable_power_collapse(0);
2338 }
2339 #else
2340 static void msm_isp_end_avtimer(void)
2341 {
2342         pr_err("AV Timer is not supported\n");
2343 }
2344 #endif
2345
2346 int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2347 {
2348         long rc = 0;
2349         int wm;
2350         struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
2351         enum cam_ahb_clk_client id;
2352
2353         ISP_DBG("%s E open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
2354         mutex_lock(&vfe_dev->realtime_mutex);
2355         mutex_lock(&vfe_dev->core_mutex);
2356
2357         /* Enable vfe clks to wake up from XO shutdown mode */
2358         if (vfe_dev->pdev->id == 0)
2359                 id = CAM_AHB_CLIENT_VFE0;
2360         else
2361                 id = CAM_AHB_CLIENT_VFE1;
2362         if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE) < 0)
2363                 pr_err("%s: failed to vote for AHB\n", __func__);
2364         vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 1);
2365         vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 1);
2366
2367         if (!vfe_dev->vfe_open_cnt) {
2368                 pr_err("%s invalid state open cnt %d\n", __func__,
2369                         vfe_dev->vfe_open_cnt);
2370                 mutex_unlock(&vfe_dev->core_mutex);
2371                 mutex_unlock(&vfe_dev->realtime_mutex);
2372                 return -EINVAL;
2373         }
2374
2375         if (vfe_dev->vfe_open_cnt > 1) {
2376                 vfe_dev->vfe_open_cnt--;
2377                 mutex_unlock(&vfe_dev->core_mutex);
2378                 mutex_unlock(&vfe_dev->realtime_mutex);
2379                 return 0;
2380         }
2381         /* Unregister page fault handler */
2382         cam_smmu_reg_client_page_fault_handler(
2383                 vfe_dev->buf_mgr->iommu_hdl,
2384                 NULL, vfe_dev);
2385
2386         rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
2387         if (rc <= 0)
2388                 pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
2389
2390         vfe_dev->hw_info->vfe_ops.core_ops.
2391                 update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
2392         vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 0);
2393
2394         /* after regular hw stop, reduce open cnt */
2395         vfe_dev->vfe_open_cnt--;
2396
2397         /* put scratch buf in all the wm */
2398         for (wm = 0; wm < vfe_dev->axi_data.hw_info->num_wm; wm++) {
2399                 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PING_FLAG);
2400                 msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PONG_FLAG);
2401         }
2402         vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
2403         vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
2404         if (vfe_dev->vt_enable) {
2405                 msm_isp_end_avtimer();
2406                 vfe_dev->vt_enable = 0;
2407         }
2408         vfe_dev->is_split = 0;
2409
2410         mutex_unlock(&vfe_dev->core_mutex);
2411         mutex_unlock(&vfe_dev->realtime_mutex);
2412         return 0;
2413 }
2414
2415 void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
2416 {
2417         unsigned long flags;
2418         struct msm_vfe_tasklet_queue_cmd *queue_cmd;
2419
2420         spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
2421         while (atomic_read(&vfe_dev->irq_cnt)) {
2422                 queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
2423                 struct msm_vfe_tasklet_queue_cmd, list);
2424
2425                 if (!queue_cmd) {
2426                         atomic_set(&vfe_dev->irq_cnt, 0);
2427                         break;
2428                 }
2429                 atomic_sub(1, &vfe_dev->irq_cnt);
2430                 list_del(&queue_cmd->list);
2431                 queue_cmd->cmd_used = 0;
2432         }
2433         spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
2434 }
2435
2436 void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
2437                                 enum msm_vfe_input_src frame_src)
2438 {
2439         struct msm_vfe_axi_stream *stream_info = NULL;
2440         uint32_t j = 0;
2441         unsigned long flags;
2442
2443         for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
2444                 stream_info = &vfe_dev->axi_data.stream_info[j];
2445                 if (stream_info->state != ACTIVE)
2446                         continue;
2447                 if (frame_src != SRC_TO_INTF(stream_info->stream_src))
2448                         continue;
2449
2450                 stream_info =
2451                         &vfe_dev->axi_data.stream_info[j];
2452                 spin_lock_irqsave(&stream_info->lock, flags);
2453                 stream_info->activated_framedrop_period  =
2454                         stream_info->requested_framedrop_period;
2455                 spin_unlock_irqrestore(&stream_info->lock, flags);
2456         }
2457 }
2458
2459 static void msm_isp_dump_irq_debug(void)
2460 {
2461         uint32_t index, count, i;
2462
2463         if (dump_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
2464                 index = dump_data.first;
2465                 count = MAX_ISP_PING_PONG_DUMP_SIZE;
2466         } else {
2467                 index = 0;
2468                 count = dump_data.first;
2469         }
2470         for (i = 0; i < count; i++) {
2471                 pr_err("%s: trace_msm_cam_ping_pong_debug_dump need re-impl\n",
2472                         __func__);
2473                 index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2474         }
2475 }
2476
2477 void msm_isp_dump_taskelet_debug(void)
2478 {
2479         uint32_t index, count, i;
2480
2481         if (tasklet_data.fill_count > MAX_ISP_PING_PONG_DUMP_SIZE) {
2482                 index = tasklet_data.first;
2483                 count = MAX_ISP_PING_PONG_DUMP_SIZE;
2484         } else {
2485                 index = 0;
2486                 count = tasklet_data.first;
2487         }
2488         for (i = 0; i < count; i++) {
2489                 pr_err("%s: trace_msm_cam_tasklet_debug_dump need implement\n",
2490                         __func__);
2491                 index = (index + 1) % MAX_ISP_PING_PONG_DUMP_SIZE;
2492         }
2493 }
2494
2495 void msm_isp_dump_ping_pong_mismatch(void)
2496 {
2497         int i;
2498
2499         spin_lock(&dump_tasklet_lock);
2500         for (i = 0; i < MAX_VFE; i++) {
2501                 dump_data.vfe_dev->hw_info->vfe_ops.axi_ops.
2502                         clear_irq_mask(
2503                 dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[i]);
2504                 synchronize_irq(
2505                 (uint32_t)dump_data.vfe_dev->common_data->dual_vfe_res->vfe_dev[
2506                         i]->vfe_irq->start);
2507         }
2508         trace_msm_cam_string(" ***** msm_isp_dump_irq_debug ****");
2509         msm_isp_dump_irq_debug();
2510         trace_msm_cam_string(" ***** msm_isp_dump_taskelet_debug ****");
2511         msm_isp_dump_taskelet_debug();
2512         spin_unlock(&dump_tasklet_lock);
2513 }