1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #define pr_fmt(fmt) "MSM-CPP %s:%d " fmt, __func__, __LINE__
16 #include <linux/delay.h>
17 #include <linux/firmware.h>
20 #include <linux/module.h>
21 #include <linux/ion.h>
22 #include <linux/proc_fs.h>
23 #include <linux/msm_ion.h>
24 #include <linux/iommu.h>
25 #include <linux/timer.h>
26 #include <linux/kernel.h>
27 #include <linux/workqueue.h>
28 #include <linux/clk/msm-clk.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-ioctl.h>
31 #include <media/msmb_camera.h>
32 #include <media/msmb_generic_buf_mgr.h>
33 #include <media/msmb_pproc.h>
35 #include "msm_isp_util.h"
36 #include "msm_camera_io_util.h"
37 #include <linux/debugfs.h>
38 #include "cam_smmu_api.h"
40 #define MSM_CPP_DRV_NAME "msm_cpp"
42 #define MSM_CPP_MAX_BUFF_QUEUE 16
44 #define CONFIG_MSM_CPP_DBG 0
46 #define ENABLE_CPP_LOW 0
48 #define CPP_CMD_TIMEOUT_MS 300
49 #define MSM_CPP_INVALID_OFFSET 0x00000000
50 #define MSM_CPP_NOMINAL_CLOCK 266670000
51 #define MSM_CPP_TURBO_CLOCK 320000000
53 #define CPP_FW_VERSION_1_2_0 0x10020000
54 #define CPP_FW_VERSION_1_4_0 0x10040000
55 #define CPP_FW_VERSION_1_6_0 0x10060000
56 #define CPP_FW_VERSION_1_8_0 0x10080000
57 #define CPP_FW_VERSION_1_10_0 0x10100000
59 /* dump the frame command before writing to the hardware */
60 #define MSM_CPP_DUMP_FRM_CMD 0
62 #define CPP_CLK_INFO_MAX 16
64 #define MSM_CPP_IRQ_MASK_VAL 0x7c8
66 #define CPP_GDSCR_SW_COLLAPSE_ENABLE 0xFFFFFFFE
67 #define CPP_GDSCR_SW_COLLAPSE_DISABLE 0xFFFFFFFD
68 #define CPP_GDSCR_HW_CONTROL_ENABLE 0x2
69 #define CPP_GDSCR_HW_CONTROL_DISABLE 0x1
70 #define PAYLOAD_NUM_PLANES 3
72 #define UBWC_MASK 0x20
74 #define MMU_PF_MASK 0x80
77 #define BATCH_DUP_MASK 0x100
79 #define IS_BATCH_BUFFER_ON_PREVIEW(new_frame) \
80 (((new_frame->batch_info.batch_mode == BATCH_MODE_PREVIEW) && \
81 new_frame->duplicate_output) ? 1 : 0)
83 #define SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(new_frame, iden, swap_iden) { \
84 if (IS_BATCH_BUFFER_ON_PREVIEW(new_frame)) \
88 #define SWAP_BUF_INDEX_FOR_BATCH_ON_PREVIEW(new_frame, buff_mgr_info, \
89 cur_index, swap_index) { \
90 if (IS_BATCH_BUFFER_ON_PREVIEW(new_frame)) \
91 buff_mgr_info.index = swap_index; \
93 buff_mgr_info.index = cur_index; \
97 * Default value for get buf to be used - 0xFFFFFFFF
99 * no valid index from userspace, use last buffer from queue.
101 #define DEFAULT_OUTPUT_BUF_INDEX 0xFFFFFFFF
102 #define IS_DEFAULT_OUTPUT_BUF_INDEX(index) \
103 ((index == DEFAULT_OUTPUT_BUF_INDEX) ? 1 : 0)
105 static struct msm_cpp_vbif_data cpp_vbif;
106 static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
107 uint32_t buff_mgr_ops, uint32_t ids, void *arg);
109 static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
110 struct msm_queue_cmd *frame_qcmd);
111 static int msm_cpp_send_command_to_hardware(struct cpp_device *cpp_dev,
112 uint32_t *cmd_msg, uint32_t payload_size);
114 static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
116 static int msm_cpp_buffer_private_ops(struct cpp_device *cpp_dev,
117 uint32_t buff_mgr_ops, uint32_t id, void *arg);
118 static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev,
119 uint8_t enable, uint32_t irq_mask);
120 static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
122 static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info);
123 static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
124 struct msm_cpp_frame_info_t *frame_info);
125 static int32_t msm_cpp_reset_vbif_and_load_fw(struct cpp_device *cpp_dev);
127 #if CONFIG_MSM_CPP_DBG
128 #define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
130 #define CPP_DBG(fmt, args...) pr_debug(fmt, ##args)
133 #define CPP_LOW(fmt, args...) do { \
134 if (ENABLE_CPP_LOW) \
135 pr_info(fmt, ##args); \
138 #define ERR_USER_COPY(to) pr_err("copy %s user\n", \
139 ((to) ? "to" : "from"))
140 #define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
142 #define msm_dequeue(queue, member, pop_dir) ({ \
143 unsigned long flags; \
144 struct msm_device_queue *__q = (queue); \
145 struct msm_queue_cmd *qcmd = 0; \
146 spin_lock_irqsave(&__q->lock, flags); \
147 if (!list_empty(&__q->list)) { \
149 qcmd = pop_dir ? list_first_entry(&__q->list, \
150 struct msm_queue_cmd, member) : \
151 list_last_entry(&__q->list, \
152 struct msm_queue_cmd, member); \
153 list_del_init(&qcmd->member); \
155 spin_unlock_irqrestore(&__q->lock, flags); \
159 #define MSM_CPP_MAX_TIMEOUT_TRIAL 1
161 struct msm_cpp_timer_data_t {
162 struct cpp_device *cpp_dev;
163 struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
164 spinlock_t processed_frame_lock;
167 struct msm_cpp_timer_t {
169 struct msm_cpp_timer_data_t data;
170 struct timer_list cpp_timer;
173 struct msm_cpp_timer_t cpp_timer;
174 static void msm_cpp_set_vbif_reg_values(struct cpp_device *cpp_dev);
177 void msm_cpp_vbif_register_error_handler(void *dev,
178 enum cpp_vbif_client client,
179 int (*client_vbif_error_handler)(void *, uint32_t))
181 if (dev == NULL || client >= VBIF_CLIENT_MAX) {
182 pr_err("%s: Fail to register handler! dev = %p, client %d\n",
183 __func__, dev, client);
187 if (client_vbif_error_handler != NULL) {
188 cpp_vbif.dev[client] = dev;
189 cpp_vbif.err_handler[client] = client_vbif_error_handler;
191 /* if handler = NULL, is unregister case */
192 cpp_vbif.dev[client] = NULL;
193 cpp_vbif.err_handler[client] = NULL;
196 static int msm_cpp_init_bandwidth_mgr(struct cpp_device *cpp_dev)
200 rc = msm_camera_register_bus_client(cpp_dev->pdev, CAM_BUS_CLIENT_CPP);
202 pr_err("Fail to register bus client\n");
206 rc = msm_camera_update_bus_bw(CAM_BUS_CLIENT_CPP, 0, 0);
208 msm_camera_unregister_bus_client(CAM_BUS_CLIENT_CPP);
209 pr_err("Fail bus scale update %d\n", rc);
216 static int msm_cpp_update_bandwidth(struct cpp_device *cpp_dev,
217 uint64_t ab, uint64_t ib)
222 rc = msm_camera_update_bus_bw(CAM_BUS_CLIENT_CPP, ab, ib);
224 pr_err("Fail bus scale update %d\n", rc);
231 void msm_cpp_deinit_bandwidth_mgr(struct cpp_device *cpp_dev)
235 rc = msm_camera_unregister_bus_client(CAM_BUS_CLIENT_CPP);
237 pr_err("Failed to unregister %d\n", rc);
242 static int msm_cpp_update_bandwidth_setting(struct cpp_device *cpp_dev,
243 uint64_t ab, uint64_t ib) {
246 if (cpp_dev->bus_master_flag)
247 rc = msm_cpp_update_bandwidth(cpp_dev, ab, ib);
249 rc = msm_isp_update_bandwidth(ISP_CPP, ab, ib);
253 static void msm_queue_init(struct msm_device_queue *queue, const char *name)
256 spin_lock_init(&queue->lock);
260 INIT_LIST_HEAD(&queue->list);
261 init_waitqueue_head(&queue->wait);
264 static void msm_enqueue(struct msm_device_queue *queue,
265 struct list_head *entry)
269 spin_lock_irqsave(&queue->lock, flags);
271 if (queue->len > queue->max) {
272 queue->max = queue->len;
273 pr_debug("queue %s new max is %d\n", queue->name, queue->max);
275 list_add_tail(entry, &queue->list);
276 wake_up(&queue->wait);
277 CPP_DBG("woke up %s\n", queue->name);
278 spin_unlock_irqrestore(&queue->lock, flags);
281 #define msm_cpp_empty_list(queue, member) { \
282 unsigned long flags; \
283 struct msm_queue_cmd *qcmd = NULL; \
285 spin_lock_irqsave(&queue->lock, flags); \
286 while (!list_empty(&queue->list)) { \
288 qcmd = list_first_entry(&queue->list, \
289 struct msm_queue_cmd, member); \
290 list_del_init(&qcmd->member); \
293 spin_unlock_irqrestore(&queue->lock, flags); \
299 static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev,
301 static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin);
302 static void cpp_timer_callback(unsigned long data);
304 uint8_t induce_error;
305 static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev);
307 static void msm_cpp_write(u32 data, void __iomem *cpp_base)
309 msm_camera_io_w((data), cpp_base + MSM_CPP_MICRO_FIFO_RX_DATA);
312 static void msm_cpp_clear_timer(struct cpp_device *cpp_dev)
316 if (atomic_read(&cpp_timer.used)) {
317 atomic_set(&cpp_timer.used, 0);
318 del_timer(&cpp_timer.cpp_timer);
319 for (i = 0; i < MAX_CPP_PROCESSING_FRAME; i++)
320 cpp_timer.data.processed_frame[i] = NULL;
321 cpp_dev->timeout_trial_cnt = 0;
325 static void msm_cpp_timer_queue_update(struct cpp_device *cpp_dev)
330 CPP_DBG("Frame done qlen %d\n", cpp_dev->processing_q.len);
331 if (cpp_dev->processing_q.len <= 1) {
332 msm_cpp_clear_timer(cpp_dev);
334 spin_lock_irqsave(&cpp_timer.data.processed_frame_lock, flags);
335 for (i = 0; i < cpp_dev->processing_q.len - 1; i++)
336 cpp_timer.data.processed_frame[i] =
337 cpp_timer.data.processed_frame[i + 1];
338 cpp_timer.data.processed_frame[i] = NULL;
339 cpp_dev->timeout_trial_cnt = 0;
340 spin_unlock_irqrestore(&cpp_timer.data.processed_frame_lock,
343 mod_timer(&cpp_timer.cpp_timer,
344 jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
348 static uint32_t msm_cpp_read(void __iomem *cpp_base)
350 uint32_t tmp, retry = 0;
353 tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_TX_STAT);
354 } while (((tmp & 0x2) == 0x0) && (retry++ < 10));
356 tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_TX_DATA);
357 CPP_DBG("Read data: 0%x\n", tmp);
359 CPP_DBG("Read failed\n");
366 static struct msm_cpp_buff_queue_info_t *msm_cpp_get_buff_queue_entry(
367 struct cpp_device *cpp_dev, uint32_t session_id, uint32_t stream_id)
370 struct msm_cpp_buff_queue_info_t *buff_queue_info = NULL;
372 for (i = 0; i < cpp_dev->num_buffq; i++) {
373 if ((cpp_dev->buff_queue[i].used == 1) &&
374 (cpp_dev->buff_queue[i].session_id == session_id) &&
375 (cpp_dev->buff_queue[i].stream_id == stream_id)) {
376 buff_queue_info = &cpp_dev->buff_queue[i];
381 if (buff_queue_info == NULL) {
382 CPP_DBG("buffer queue entry for sess:%d strm:%d not found\n",
383 session_id, stream_id);
385 return buff_queue_info;
388 static unsigned long msm_cpp_get_phy_addr(struct cpp_device *cpp_dev,
389 struct msm_cpp_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
390 uint8_t native_buff, int32_t *fd)
392 unsigned long phy_add = 0;
393 struct list_head *buff_head;
394 struct msm_cpp_buffer_map_list_t *buff, *save;
397 buff_head = &buff_queue_info->native_buff_head;
399 buff_head = &buff_queue_info->vb2_buff_head;
401 list_for_each_entry_safe(buff, save, buff_head, entry) {
402 if (buff->map_info.buff_info.index == buff_index) {
403 phy_add = buff->map_info.phy_addr;
404 *fd = buff->map_info.buff_info.fd;
412 static unsigned long msm_cpp_queue_buffer_info(struct cpp_device *cpp_dev,
413 struct msm_cpp_buff_queue_info_t *buff_queue,
414 struct msm_cpp_buffer_info_t *buffer_info)
416 struct list_head *buff_head;
417 struct msm_cpp_buffer_map_list_t *buff, *save;
420 if (buffer_info->native_buff)
421 buff_head = &buff_queue->native_buff_head;
423 buff_head = &buff_queue->vb2_buff_head;
425 list_for_each_entry_safe(buff, save, buff_head, entry) {
426 if (buff->map_info.buff_info.index == buffer_info->index) {
427 pr_err("error buf index already queued\n");
428 pr_err("error buf, fd %d idx %d native %d ssid %d %d\n",
429 buffer_info->fd, buffer_info->index,
430 buffer_info->native_buff,
431 buff_queue->session_id,
432 buff_queue->stream_id);
433 pr_err("existing buf,fd %d idx %d native %d id %x\n",
434 buff->map_info.buff_info.fd,
435 buff->map_info.buff_info.index,
436 buff->map_info.buff_info.native_buff,
437 buff->map_info.buff_info.identity);
443 sizeof(struct msm_cpp_buffer_map_list_t), GFP_KERNEL);
445 pr_err("error allocating memory\n");
449 buff->map_info.buff_info = *buffer_info;
450 buff->map_info.buf_fd = buffer_info->fd;
452 trace_printk("fd %d index %d native_buff %d ssid %d %d\n",
453 buffer_info->fd, buffer_info->index,
454 buffer_info->native_buff, buff_queue->session_id,
455 buff_queue->stream_id);
457 if (buff_queue->security_mode == SECURE_MODE)
458 rc = cam_smmu_get_stage2_phy_addr(cpp_dev->iommu_hdl,
459 buffer_info->fd, CAM_SMMU_MAP_RW,
460 cpp_dev->ion_client, &buff->map_info.phy_addr,
461 (size_t *)&buff->map_info.len);
463 rc = cam_smmu_get_phy_addr(cpp_dev->iommu_hdl,
464 buffer_info->fd, CAM_SMMU_MAP_RW,
465 &buff->map_info.phy_addr,
466 (size_t *)&buff->map_info.len);
468 pr_err("ION mmap for CPP buffer failed\n");
473 INIT_LIST_HEAD(&buff->entry);
474 list_add_tail(&buff->entry, buff_head);
476 return buff->map_info.phy_addr;
481 static void msm_cpp_dequeue_buffer_info(struct cpp_device *cpp_dev,
482 struct msm_cpp_buff_queue_info_t *buff_queue,
483 struct msm_cpp_buffer_map_list_t *buff)
487 trace_printk("fd %d index %d native_buf %d ssid %d %d\n",
488 buff->map_info.buf_fd, buff->map_info.buff_info.index,
489 buff->map_info.buff_info.native_buff, buff_queue->session_id,
490 buff_queue->stream_id);
492 if (buff_queue->security_mode == SECURE_MODE)
493 ret = cam_smmu_put_stage2_phy_addr(cpp_dev->iommu_hdl,
494 buff->map_info.buf_fd);
496 ret = cam_smmu_put_phy_addr(cpp_dev->iommu_hdl,
497 buff->map_info.buf_fd);
499 pr_err("Error: cannot put the iommu handle back to ion fd\n");
501 list_del_init(&buff->entry);
507 static unsigned long msm_cpp_fetch_buffer_info(struct cpp_device *cpp_dev,
508 struct msm_cpp_buffer_info_t *buffer_info, uint32_t session_id,
509 uint32_t stream_id, int32_t *fd)
511 unsigned long phy_addr = 0;
512 struct msm_cpp_buff_queue_info_t *buff_queue_info;
513 uint8_t native_buff = buffer_info->native_buff;
515 buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev, session_id,
517 if (buff_queue_info == NULL) {
518 pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
519 session_id, stream_id);
523 phy_addr = msm_cpp_get_phy_addr(cpp_dev, buff_queue_info,
524 buffer_info->index, native_buff, fd);
525 if ((phy_addr == 0) && (native_buff)) {
526 phy_addr = msm_cpp_queue_buffer_info(cpp_dev, buff_queue_info,
528 *fd = buffer_info->fd;
534 static int32_t msm_cpp_dequeue_buff_info_list(struct cpp_device *cpp_dev,
535 struct msm_cpp_buff_queue_info_t *buff_queue_info)
537 struct msm_cpp_buffer_map_list_t *buff, *save;
538 struct list_head *buff_head;
540 buff_head = &buff_queue_info->native_buff_head;
541 list_for_each_entry_safe(buff, save, buff_head, entry) {
542 msm_cpp_dequeue_buffer_info(cpp_dev, buff_queue_info, buff);
545 buff_head = &buff_queue_info->vb2_buff_head;
546 list_for_each_entry_safe(buff, save, buff_head, entry) {
547 msm_cpp_dequeue_buffer_info(cpp_dev, buff_queue_info, buff);
553 static int32_t msm_cpp_dequeue_buff(struct cpp_device *cpp_dev,
554 struct msm_cpp_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
557 struct msm_cpp_buffer_map_list_t *buff, *save;
558 struct list_head *buff_head;
561 buff_head = &buff_queue_info->native_buff_head;
563 buff_head = &buff_queue_info->vb2_buff_head;
565 list_for_each_entry_safe(buff, save, buff_head, entry) {
566 if (buff->map_info.buff_info.index == buff_index) {
567 msm_cpp_dequeue_buffer_info(cpp_dev, buff_queue_info,
576 static int32_t msm_cpp_add_buff_queue_entry(struct cpp_device *cpp_dev,
577 uint16_t session_id, uint16_t stream_id)
580 struct msm_cpp_buff_queue_info_t *buff_queue_info;
582 for (i = 0; i < cpp_dev->num_buffq; i++) {
583 if (cpp_dev->buff_queue[i].used == 0) {
584 buff_queue_info = &cpp_dev->buff_queue[i];
585 buff_queue_info->used = 1;
586 buff_queue_info->session_id = session_id;
587 buff_queue_info->stream_id = stream_id;
588 buff_queue_info->security_mode =
589 cpp_dev->security_mode;
590 INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
591 INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
595 pr_err("buffer queue full. error for sessionid: %d streamid: %d\n",
596 session_id, stream_id);
600 static int32_t msm_cpp_free_buff_queue_entry(struct cpp_device *cpp_dev,
601 uint32_t session_id, uint32_t stream_id)
603 struct msm_cpp_buff_queue_info_t *buff_queue_info;
605 buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev, session_id,
607 if (buff_queue_info == NULL) {
608 pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
609 session_id, stream_id);
613 buff_queue_info->used = 0;
614 buff_queue_info->session_id = 0;
615 buff_queue_info->stream_id = 0;
616 buff_queue_info->security_mode = NON_SECURE_MODE;
617 INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
618 INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
622 static int32_t msm_cpp_create_buff_queue(struct cpp_device *cpp_dev,
625 struct msm_cpp_buff_queue_info_t *buff_queue;
627 buff_queue = kzalloc(
628 sizeof(struct msm_cpp_buff_queue_info_t) * num_buffq,
631 pr_err("Buff queue allocation failure\n");
635 if (cpp_dev->buff_queue) {
636 pr_err("Buff queue not empty\n");
640 cpp_dev->buff_queue = buff_queue;
641 cpp_dev->num_buffq = num_buffq;
646 static void msm_cpp_delete_buff_queue(struct cpp_device *cpp_dev)
650 for (i = 0; i < cpp_dev->num_buffq; i++) {
651 if (cpp_dev->buff_queue[i].used == 1) {
652 pr_warn("Queue not free sessionid: %d, streamid: %d\n",
653 cpp_dev->buff_queue[i].session_id,
654 cpp_dev->buff_queue[i].stream_id);
655 msm_cpp_dequeue_buff_info_list
656 (cpp_dev, &cpp_dev->buff_queue[i]);
657 msm_cpp_free_buff_queue_entry(cpp_dev,
658 cpp_dev->buff_queue[i].session_id,
659 cpp_dev->buff_queue[i].stream_id);
662 kzfree(cpp_dev->buff_queue);
663 cpp_dev->buff_queue = NULL;
664 cpp_dev->num_buffq = 0;
668 static int32_t msm_cpp_poll(void __iomem *cpp_base, u32 val)
670 uint32_t tmp, retry = 0;
674 tmp = msm_cpp_read(cpp_base);
675 if (tmp != 0xDEADBEEF)
676 CPP_LOW("poll: 0%x\n", tmp);
677 usleep_range(200, 250);
678 } while ((tmp != val) && (retry++ < MSM_CPP_POLL_RETRIES));
679 if (retry < MSM_CPP_POLL_RETRIES) {
680 CPP_LOW("Poll finished\n");
682 pr_err("Poll failed: expect: 0x%x\n", val);
688 static int32_t msm_cpp_poll_rx_empty(void __iomem *cpp_base)
690 uint32_t tmp, retry = 0;
693 tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_RX_STAT);
694 while (((tmp & 0x2) != 0x0) && (retry++ < MSM_CPP_POLL_RETRIES)) {
696 * Below usleep values are chosen based on experiments
697 * and this was the smallest number which works. This
698 * sleep is needed to leave enough time for Microcontroller
701 usleep_range(200, 300);
702 tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_RX_STAT);
705 if (retry < MSM_CPP_POLL_RETRIES) {
706 CPP_LOW("Poll rx empty\n");
708 pr_err("Poll rx empty failed\n");
714 static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
715 struct msm_cpp_frame_info_t *frame_info)
717 int32_t s_base, p_base;
718 uint32_t rd_off, wr0_off, wr1_off, wr2_off, wr3_off;
719 uint32_t wr0_mdata_off, wr1_mdata_off, wr2_mdata_off, wr3_mdata_off;
720 uint32_t rd_ref_off, wr_ref_off;
721 uint32_t s_size, p_size;
722 uint8_t tnr_enabled, ubwc_enabled, cds_en;
724 uint32_t *cpp_frame_msg;
726 cpp_frame_msg = frame_info->cpp_cmd_msg;
728 /* Update stripe/plane size and base offsets */
729 s_base = cpp_dev->payload_params.stripe_base;
730 s_size = cpp_dev->payload_params.stripe_size;
731 p_base = cpp_dev->payload_params.plane_base;
732 p_size = cpp_dev->payload_params.plane_size;
734 /* Fetch engine Offset */
735 rd_off = cpp_dev->payload_params.rd_pntr_off;
736 /* Write engine offsets */
737 wr0_off = cpp_dev->payload_params.wr_0_pntr_off;
738 wr1_off = wr0_off + 1;
739 wr2_off = wr1_off + 1;
740 wr3_off = wr2_off + 1;
741 /* Reference engine offsets */
742 rd_ref_off = cpp_dev->payload_params.rd_ref_pntr_off;
743 wr_ref_off = cpp_dev->payload_params.wr_ref_pntr_off;
744 /* Meta data offsets */
746 cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off;
747 wr1_mdata_off = (wr0_mdata_off + 1);
748 wr2_mdata_off = (wr1_mdata_off + 1);
749 wr3_mdata_off = (wr2_mdata_off + 1);
751 tnr_enabled = ((frame_info->feature_mask & TNR_MASK) >> 2);
752 ubwc_enabled = ((frame_info->feature_mask & UBWC_MASK) >> 5);
753 cds_en = ((frame_info->feature_mask & CDS_MASK) >> 6);
755 for (i = 0; i < frame_info->num_strips; i++) {
756 pr_err("stripe %d: in %x, out1 %x out2 %x, out3 %x, out4 %x\n",
757 i, cpp_frame_msg[s_base + rd_off + i * s_size],
758 cpp_frame_msg[s_base + wr0_off + i * s_size],
759 cpp_frame_msg[s_base + wr1_off + i * s_size],
760 cpp_frame_msg[s_base + wr2_off + i * s_size],
761 cpp_frame_msg[s_base + wr3_off + i * s_size]);
764 pr_err("stripe %d: read_ref %x, write_ref %x\n", i,
765 cpp_frame_msg[s_base + rd_ref_off + i * s_size],
766 cpp_frame_msg[s_base + wr_ref_off + i * s_size]
771 pr_err("stripe %d:, dsdn_off %x\n", i,
772 cpp_frame_msg[s_base + rd_ref_off + i * s_size]
777 pr_err("stripe %d: metadata %x, %x, %x, %x\n", i,
778 cpp_frame_msg[s_base + wr0_mdata_off +
780 cpp_frame_msg[s_base + wr1_mdata_off +
782 cpp_frame_msg[s_base + wr2_mdata_off +
784 cpp_frame_msg[s_base + wr3_mdata_off +
793 static void msm_cpp_iommu_fault_reset_handler(
794 struct iommu_domain *domain, struct device *dev,
797 struct cpp_device *cpp_dev = NULL;
800 pr_err("Invalid token\n");
806 if (cpp_dev->fault_status != CPP_IOMMU_FAULT_NONE) {
807 pr_err("fault already detected %d\n", cpp_dev->fault_status);
811 cpp_dev->fault_status = CPP_IOMMU_FAULT_DETECTED;
813 /* mask IRQ status */
814 msm_camera_io_w(0xB, cpp_dev->cpp_hw_base + 0xC);
816 pr_err("Issue CPP HALT %d\n", cpp_dev->fault_status);
818 /* MMSS_A_CPP_AXI_CMD = 0x16C, reset 0x1*/
819 msm_camera_io_w(0x1, cpp_dev->cpp_hw_base + 0x16C);
823 static void msm_cpp_iommu_fault_handler(struct iommu_domain *domain,
824 struct device *dev, unsigned long iova, int flags, void *token)
826 struct cpp_device *cpp_dev = NULL;
827 struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
828 int32_t i = 0, queue_len = 0;
829 struct msm_device_queue *queue = NULL;
830 int32_t ifd, ofd, dfd, t0fd, t1fd;
837 if (cpp_dev->fault_status != CPP_IOMMU_FAULT_DETECTED) {
838 pr_err("fault recovery already done %d\n",
839 cpp_dev->fault_status);
843 disable_irq(cpp_dev->irq->start);
844 if (atomic_read(&cpp_timer.used)) {
845 atomic_set(&cpp_timer.used, 0);
846 del_timer_sync(&cpp_timer.cpp_timer);
848 tasklet_kill(&cpp_dev->cpp_tasklet);
850 pr_err("in recovery, HALT status = 0x%x\n",
851 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10));
853 while (counter < MSM_CPP_POLL_RETRIES) {
854 result = msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10);
857 usleep_range(100, 200);
860 /* MMSS_A_CPP_IRQ_STATUS_0 = 0x10 */
861 pr_err("counter %d HALT status later = 0x%x\n",
863 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10));
865 /* MMSS_A_CPP_RST_CMD_0 = 0x8 firmware reset = 0x3FFFF */
866 msm_camera_io_w(0x3FFFF, cpp_dev->cpp_hw_base + 0x8);
869 while (counter < MSM_CPP_POLL_RETRIES) {
870 result = msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10);
873 usleep_range(100, 200);
877 /* MMSS_A_CPP_IRQ_STATUS_0 = 0x10 */
878 pr_err("counter %d after reset IRQ_STATUS_0 = 0x%x\n",
880 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10));
882 /* MMSS_A_CPP_AXI_CMD = 0x16C, reset 0x1*/
883 msm_camera_io_w(0x0, cpp_dev->cpp_hw_base + 0x16C);
885 queue = &cpp_timer.data.cpp_dev->processing_q;
886 queue_len = queue->len;
888 pr_err("%s:%d: Invalid queuelen\n", __func__, __LINE__);
891 for (i = 0; i < queue_len; i++) {
892 if (cpp_timer.data.processed_frame[i]) {
894 cpp_timer.data.processed_frame[i];
895 ifd = processed_frame[i]->input_buffer_info.fd;
896 ofd = processed_frame[i]->
897 output_buffer_info[0].fd;
898 dfd = processed_frame[i]->
899 duplicate_buffer_info.fd;
900 t0fd = processed_frame[i]->
901 tnr_scratch_buffer_info[0].fd;
902 t1fd = processed_frame[i]->
903 tnr_scratch_buffer_info[1].fd;
904 pr_err("Fault on identity=0x%x, frame_id=%03d\n",
905 processed_frame[i]->identity,
906 processed_frame[i]->frame_id);
907 pr_err("ifd %d ofd %d dfd %d t0fd %d t1fd %d\n",
908 ifd, ofd, dfd, t0fd, t1fd);
909 msm_cpp_dump_addr(cpp_dev, processed_frame[i]);
910 msm_cpp_dump_frame_cmd(processed_frame[i]);
913 msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
914 cpp_dev->fault_status = CPP_IOMMU_FAULT_RECOVERED;
915 pr_err("fault recovery successful\n");
920 static int cpp_init_mem(struct cpp_device *cpp_dev)
925 if (cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_5_0_0 ||
926 cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_5_1_0)
927 rc = cam_smmu_get_handle("cpp_0", &iommu_hdl);
929 rc = cam_smmu_get_handle("cpp", &iommu_hdl);
932 pr_err("smmu get handle failed\n");
936 cpp_dev->iommu_hdl = iommu_hdl;
937 cam_smmu_reg_client_page_fault_handler(
939 msm_cpp_iommu_fault_handler,
940 msm_cpp_iommu_fault_reset_handler,
946 static irqreturn_t msm_cpp_irq(int irq_num, void *data)
952 uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
953 struct cpp_device *cpp_dev = data;
954 struct msm_cpp_tasklet_queue_cmd *queue_cmd;
956 irq_status = msm_camera_io_r(cpp_dev->base + MSM_CPP_MICRO_IRQGEN_STAT);
958 if (irq_status & 0x8) {
959 tx_level = msm_camera_io_r(cpp_dev->base +
960 MSM_CPP_MICRO_FIFO_TX_STAT) >> 2;
961 for (i = 0; i < tx_level; i++) {
962 tx_fifo[i] = msm_camera_io_r(cpp_dev->base +
963 MSM_CPP_MICRO_FIFO_TX_DATA);
965 spin_lock_irqsave(&cpp_dev->tasklet_lock, flags);
966 queue_cmd = &cpp_dev->tasklet_queue_cmd[cpp_dev->taskletq_idx];
967 if (queue_cmd->cmd_used) {
968 pr_err("%s:%d] cpp tasklet queue overflow tx %d rc %x",
969 __func__, __LINE__, tx_level, irq_status);
970 list_del(&queue_cmd->list);
972 atomic_add(1, &cpp_dev->irq_cnt);
974 queue_cmd->irq_status = irq_status;
975 queue_cmd->tx_level = tx_level;
976 memset(&queue_cmd->tx_fifo[0], 0, sizeof(queue_cmd->tx_fifo));
977 for (i = 0; i < tx_level; i++)
978 queue_cmd->tx_fifo[i] = tx_fifo[i];
980 queue_cmd->cmd_used = 1;
981 cpp_dev->taskletq_idx =
982 (cpp_dev->taskletq_idx + 1) % MSM_CPP_TASKLETQ_SIZE;
983 list_add_tail(&queue_cmd->list, &cpp_dev->tasklet_q);
984 spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
986 tasklet_schedule(&cpp_dev->cpp_tasklet);
987 } else if (irq_status & 0x7C0) {
988 pr_debug("irq_status: 0x%x\n", irq_status);
989 pr_debug("DEBUG_SP: 0x%x\n",
990 msm_camera_io_r(cpp_dev->base + 0x40));
991 pr_debug("DEBUG_T: 0x%x\n",
992 msm_camera_io_r(cpp_dev->base + 0x44));
993 pr_debug("DEBUG_N: 0x%x\n",
994 msm_camera_io_r(cpp_dev->base + 0x48));
995 pr_debug("DEBUG_R: 0x%x\n",
996 msm_camera_io_r(cpp_dev->base + 0x4C));
997 pr_debug("DEBUG_OPPC: 0x%x\n",
998 msm_camera_io_r(cpp_dev->base + 0x50));
999 pr_debug("DEBUG_MO: 0x%x\n",
1000 msm_camera_io_r(cpp_dev->base + 0x54));
1001 pr_debug("DEBUG_TIMER0: 0x%x\n",
1002 msm_camera_io_r(cpp_dev->base + 0x60));
1003 pr_debug("DEBUG_TIMER1: 0x%x\n",
1004 msm_camera_io_r(cpp_dev->base + 0x64));
1005 pr_debug("DEBUG_GPI: 0x%x\n",
1006 msm_camera_io_r(cpp_dev->base + 0x70));
1007 pr_debug("DEBUG_GPO: 0x%x\n",
1008 msm_camera_io_r(cpp_dev->base + 0x74));
1009 pr_debug("DEBUG_T0: 0x%x\n",
1010 msm_camera_io_r(cpp_dev->base + 0x80));
1011 pr_debug("DEBUG_R0: 0x%x\n",
1012 msm_camera_io_r(cpp_dev->base + 0x84));
1013 pr_debug("DEBUG_T1: 0x%x\n",
1014 msm_camera_io_r(cpp_dev->base + 0x88));
1015 pr_debug("DEBUG_R1: 0x%x\n",
1016 msm_camera_io_r(cpp_dev->base + 0x8C));
1018 msm_camera_io_w(irq_status, cpp_dev->base + MSM_CPP_MICRO_IRQGEN_CLR);
1022 void msm_cpp_do_tasklet(unsigned long data)
1024 unsigned long flags;
1025 uint32_t irq_status;
1027 uint32_t msg_id, cmd_len;
1029 uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
1030 struct cpp_device *cpp_dev = (struct cpp_device *) data;
1031 struct msm_cpp_tasklet_queue_cmd *queue_cmd;
1033 while (atomic_read(&cpp_dev->irq_cnt)) {
1034 spin_lock_irqsave(&cpp_dev->tasklet_lock, flags);
1035 queue_cmd = list_first_entry(&cpp_dev->tasklet_q,
1036 struct msm_cpp_tasklet_queue_cmd, list);
1038 atomic_set(&cpp_dev->irq_cnt, 0);
1039 spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
1042 atomic_sub(1, &cpp_dev->irq_cnt);
1043 list_del(&queue_cmd->list);
1044 queue_cmd->cmd_used = 0;
1045 irq_status = queue_cmd->irq_status;
1046 tx_level = queue_cmd->tx_level;
1047 for (i = 0; i < tx_level; i++)
1048 tx_fifo[i] = queue_cmd->tx_fifo[i];
1050 spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
1052 for (i = 0; i < tx_level; i++) {
1053 if (tx_fifo[i] == MSM_CPP_MSG_ID_CMD) {
1054 cmd_len = tx_fifo[i+1];
1055 msg_id = tx_fifo[i+2];
1056 if (msg_id == MSM_CPP_MSG_ID_FRAME_ACK) {
1057 CPP_DBG("Frame done!!\n");
1058 /* delete CPP timer */
1059 CPP_DBG("delete timer.\n");
1060 msm_cpp_timer_queue_update(cpp_dev);
1061 msm_cpp_notify_frame_done(cpp_dev, 0);
1062 } else if (msg_id ==
1063 MSM_CPP_MSG_ID_FRAME_NACK) {
1064 pr_err("NACK error from hw!!\n");
1065 CPP_DBG("delete timer.\n");
1066 msm_cpp_timer_queue_update(cpp_dev);
1067 msm_cpp_notify_frame_done(cpp_dev, 0);
1075 static int cpp_init_hardware(struct cpp_device *cpp_dev)
1078 uint32_t vbif_version;
1079 cpp_dev->turbo_vote = 0;
1080 cpp_dev->fault_status = CPP_IOMMU_FAULT_NONE;
1082 rc = msm_camera_regulator_enable(cpp_dev->cpp_vdd,
1083 cpp_dev->num_reg, true);
1085 pr_err("%s: failed to enable regulators\n", __func__);
1086 goto reg_enable_failed;
1089 if (cpp_dev->micro_reset) {
1090 rc = msm_cpp_set_micro_clk(cpp_dev);
1092 pr_err("%s: reset micro clk failed\n", __func__);
1097 rc = msm_camera_clk_enable(&cpp_dev->pdev->dev, cpp_dev->clk_info,
1098 cpp_dev->cpp_clk, cpp_dev->num_clks, true);
1100 pr_err("%s: clk enable failed\n", __func__);
1104 rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
1107 pr_err("%s: failed to vote for AHB\n", __func__);
1111 if (cpp_dev->state != CPP_STATE_BOOT) {
1112 rc = msm_camera_register_irq(cpp_dev->pdev, cpp_dev->irq,
1113 msm_cpp_irq, IRQF_TRIGGER_RISING, "cpp", cpp_dev);
1115 pr_err("%s: irq request fail\n", __func__);
1118 rc = msm_cam_buf_mgr_register_ops(&cpp_dev->buf_mgr_ops);
1120 pr_err("buf mngr req ops failed\n");
1121 msm_camera_unregister_irq(cpp_dev->pdev,
1122 cpp_dev->irq, cpp_dev);
1127 cpp_dev->hw_info.cpp_hw_version =
1128 msm_camera_io_r(cpp_dev->cpp_hw_base);
1129 if (cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_4_1_0) {
1130 vbif_version = msm_camera_io_r(cpp_dev->vbif_base);
1131 if (vbif_version == VBIF_VERSION_2_3_0)
1132 cpp_dev->hw_info.cpp_hw_version = CPP_HW_VERSION_4_0_0;
1134 pr_info("CPP HW Version: 0x%x\n", cpp_dev->hw_info.cpp_hw_version);
1135 cpp_dev->hw_info.cpp_hw_caps =
1136 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4);
1138 rc = msm_update_freq_tbl(cpp_dev);
1140 goto pwr_collapse_reset;
1142 pr_debug("CPP HW Caps: 0x%x\n", cpp_dev->hw_info.cpp_hw_caps);
1143 msm_camera_io_w(0x1, cpp_dev->vbif_base + 0x4);
1144 cpp_dev->taskletq_idx = 0;
1145 atomic_set(&cpp_dev->irq_cnt, 0);
1146 rc = msm_cpp_create_buff_queue(cpp_dev, MSM_CPP_MAX_BUFF_QUEUE);
1148 pr_err("%s: create buff queue failed with err %d\n",
1150 goto pwr_collapse_reset;
1152 pr_err("stream_cnt:%d\n", cpp_dev->stream_cnt);
1153 cpp_dev->stream_cnt = 0;
1154 if (cpp_dev->fw_name_bin) {
1155 msm_camera_enable_irq(cpp_dev->irq, false);
1156 rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
1158 pr_err("%s: load firmware failure %d-retry\n",
1160 rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
1162 msm_camera_enable_irq(cpp_dev->irq, true);
1163 goto pwr_collapse_reset;
1166 msm_camera_enable_irq(cpp_dev->irq, true);
1167 msm_camera_io_w_mb(0x7C8, cpp_dev->base +
1168 MSM_CPP_MICRO_IRQGEN_MASK);
1169 msm_camera_io_w_mb(0xFFFF, cpp_dev->base +
1170 MSM_CPP_MICRO_IRQGEN_CLR);
1173 msm_cpp_set_vbif_reg_values(cpp_dev);
1177 msm_cpp_update_gdscr_status(cpp_dev, false);
1178 msm_camera_unregister_irq(cpp_dev->pdev, cpp_dev->irq, cpp_dev);
1180 if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
1181 CAM_AHB_SUSPEND_VOTE) < 0)
1182 pr_err("%s: failed to remove vote for AHB\n", __func__);
1184 msm_camera_clk_enable(&cpp_dev->pdev->dev, cpp_dev->clk_info,
1185 cpp_dev->cpp_clk, cpp_dev->num_clks, false);
1187 msm_camera_regulator_enable(cpp_dev->cpp_vdd,
1188 cpp_dev->num_reg, false);
1193 static void cpp_release_hardware(struct cpp_device *cpp_dev)
1197 if (cpp_dev->state != CPP_STATE_BOOT) {
1198 msm_camera_unregister_irq(cpp_dev->pdev, cpp_dev->irq, cpp_dev);
1199 tasklet_kill(&cpp_dev->cpp_tasklet);
1200 atomic_set(&cpp_dev->irq_cnt, 0);
1202 msm_cpp_delete_buff_queue(cpp_dev);
1203 msm_cpp_update_gdscr_status(cpp_dev, false);
1204 if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
1205 CAM_AHB_SUSPEND_VOTE) < 0)
1206 pr_err("%s: failed to remove vote for AHB\n", __func__);
1207 msm_camera_clk_enable(&cpp_dev->pdev->dev, cpp_dev->clk_info,
1208 cpp_dev->cpp_clk, cpp_dev->num_clks, false);
1209 msm_camera_regulator_enable(cpp_dev->cpp_vdd, cpp_dev->num_reg, false);
1210 if (cpp_dev->stream_cnt > 0) {
1211 pr_warn("stream count active\n");
1212 rc = msm_cpp_update_bandwidth_setting(cpp_dev, 0, 0);
1214 cpp_dev->stream_cnt = 0;
1215 pr_info("cpp hw release done\n");
1218 static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
1221 uint32_t *ptr_bin = NULL;
1222 int32_t rc = 0, ret = 0;
1225 pr_err("%s:%d] invalid fw name", __func__, __LINE__);
1229 pr_debug("%s:%d] FW file: %s\n", __func__, __LINE__, fw_name_bin);
1230 if (cpp_dev->fw == NULL) {
1231 pr_err("%s:%d] fw NULL", __func__, __LINE__);
1236 ptr_bin = (uint32_t *)cpp_dev->fw->data;
1238 pr_err("%s:%d] Fw bin NULL", __func__, __LINE__);
1243 rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
1244 CAM_AHB_NOMINAL_VOTE);
1246 pr_err("%s:%d: failed to vote for AHB\n", __func__, __LINE__);
1250 msm_camera_io_w(0x1, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
1251 msm_camera_io_w(0x1, cpp_dev->base +
1252 MSM_CPP_MICRO_BOOT_START);
1254 rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
1256 pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
1257 MSM_CPP_MSG_ID_CMD, rc);
1261 msm_camera_io_w(0xFFFFFFFF, cpp_dev->base +
1262 MSM_CPP_MICRO_IRQGEN_CLR);
1264 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
1266 pr_err("%s:%d] poll rx empty failed %d",
1267 __func__, __LINE__, rc);
1270 /*Start firmware loading*/
1271 msm_cpp_write(MSM_CPP_CMD_FW_LOAD, cpp_dev->base);
1272 msm_cpp_write(cpp_dev->fw->size, cpp_dev->base);
1273 msm_cpp_write(MSM_CPP_START_ADDRESS, cpp_dev->base);
1274 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
1276 pr_err("%s:%d] poll rx empty failed %d",
1277 __func__, __LINE__, rc);
1280 for (i = 0; i < cpp_dev->fw->size/4; i++) {
1281 msm_cpp_write(*ptr_bin, cpp_dev->base);
1282 if (i % MSM_CPP_RX_FIFO_LEVEL == 0) {
1283 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
1285 pr_err("%s:%d] poll rx empty failed %d",
1286 __func__, __LINE__, rc);
1292 msm_camera_io_w_mb(0x00, cpp_dev->cpp_hw_base + 0xC);
1293 rc = msm_cpp_update_gdscr_status(cpp_dev, true);
1295 pr_err("update cpp gdscr status failed\n");
1296 rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_OK);
1298 pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
1299 MSM_CPP_MSG_ID_OK, rc);
1303 rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
1305 pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
1306 MSM_CPP_MSG_ID_CMD, rc);
1310 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
1312 pr_err("%s:%d] poll rx empty failed %d",
1313 __func__, __LINE__, rc);
1316 /*Trigger MC to jump to start address*/
1317 msm_cpp_write(MSM_CPP_CMD_EXEC_JUMP, cpp_dev->base);
1318 msm_cpp_write(MSM_CPP_JUMP_ADDRESS, cpp_dev->base);
1320 rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
1322 pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
1323 MSM_CPP_MSG_ID_CMD, rc);
1327 rc = msm_cpp_poll(cpp_dev->base, 0x1);
1329 pr_err("%s:%d] poll command 0x1 failed %d", __func__, __LINE__,
1334 rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_JUMP_ACK);
1336 pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
1337 MSM_CPP_MSG_ID_JUMP_ACK, rc);
1341 rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
1343 pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
1344 MSM_CPP_MSG_ID_JUMP_ACK, rc);
1348 ret = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
1351 pr_err("%s:%d: failed to vote for AHB\n", __func__, __LINE__);
1358 int32_t msm_cpp_reset_vbif_clients(struct cpp_device *cpp_dev)
1362 pr_warn("%s: handle vbif hang...\n", __func__);
1363 for (i = 0; i < VBIF_CLIENT_MAX; i++) {
1364 if (cpp_dev->vbif_data->err_handler[i] == NULL)
1367 cpp_dev->vbif_data->err_handler[i](
1368 cpp_dev->vbif_data->dev[i], CPP_VBIF_ERROR_HANG);
1373 int32_t msm_cpp_reset_vbif_and_load_fw(struct cpp_device *cpp_dev)
1377 msm_cpp_reset_vbif_clients(cpp_dev);
1379 rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
1381 pr_err("Reset and load fw failed %d\n", rc);
1386 int cpp_vbif_error_handler(void *dev, uint32_t vbif_error)
1388 struct cpp_device *cpp_dev = NULL;
1390 if (dev == NULL || vbif_error >= CPP_VBIF_ERROR_MAX) {
1391 pr_err("failed: dev %p, vbif error %d\n", dev, vbif_error);
1395 cpp_dev = (struct cpp_device *) dev;
1397 /* MMSS_A_CPP_IRQ_STATUS_0 = 0x10 */
1398 pr_err("%s: before reset halt... read MMSS_A_CPP_IRQ_STATUS_0 = 0x%x",
1399 __func__, msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10));
1401 pr_err("%s: start reset bus bridge on FD + CPP!\n", __func__);
1402 /* MMSS_A_CPP_RST_CMD_0 = 0x8, firmware reset = 0x3DF77 */
1403 msm_camera_io_w(0x3DF77, cpp_dev->cpp_hw_base + 0x8);
1405 /* MMSS_A_CPP_IRQ_STATUS_0 = 0x10 */
1406 pr_err("%s: after reset halt... read MMSS_A_CPP_IRQ_STATUS_0 = 0x%x",
1407 __func__, msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10));
1412 static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1416 struct cpp_device *cpp_dev = NULL;
1421 pr_err("Wrong input parameters sd %pK fh %pK!",
1425 cpp_dev = v4l2_get_subdevdata(sd);
1427 pr_err("failed: cpp_dev %pK\n", cpp_dev);
1430 mutex_lock(&cpp_dev->mutex);
1431 if (cpp_dev->cpp_open_cnt == MAX_ACTIVE_CPP_INSTANCE) {
1432 pr_err("No free CPP instance\n");
1433 mutex_unlock(&cpp_dev->mutex);
1437 for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
1438 if (cpp_dev->cpp_subscribe_list[i].active == 0) {
1439 cpp_dev->cpp_subscribe_list[i].active = 1;
1440 cpp_dev->cpp_subscribe_list[i].vfh = &fh->vfh;
1444 if (i == MAX_ACTIVE_CPP_INSTANCE) {
1445 pr_err("No free instance\n");
1446 mutex_unlock(&cpp_dev->mutex);
1450 CPP_DBG("open %d %pK\n", i, &fh->vfh);
1451 cpp_dev->cpp_open_cnt++;
1453 msm_cpp_vbif_register_error_handler(cpp_dev,
1454 VBIF_CLIENT_CPP, cpp_vbif_error_handler);
1456 if (cpp_dev->cpp_open_cnt == 1) {
1457 rc = cpp_init_hardware(cpp_dev);
1459 cpp_dev->cpp_open_cnt--;
1460 cpp_dev->cpp_subscribe_list[i].active = 0;
1461 cpp_dev->cpp_subscribe_list[i].vfh = NULL;
1462 mutex_unlock(&cpp_dev->mutex);
1466 rc = cpp_init_mem(cpp_dev);
1468 pr_err("Error: init memory fail\n");
1469 cpp_dev->cpp_open_cnt--;
1470 cpp_dev->cpp_subscribe_list[i].active = 0;
1471 cpp_dev->cpp_subscribe_list[i].vfh = NULL;
1472 mutex_unlock(&cpp_dev->mutex);
1476 cpp_dev->state = CPP_STATE_IDLE;
1478 CPP_DBG("Invoking msm_ion_client_create()\n");
1479 cpp_dev->ion_client = msm_ion_client_create("cpp");
1480 if (cpp_dev->ion_client == NULL) {
1481 pr_err("msm_ion_client_create() failed\n");
1482 mutex_unlock(&cpp_dev->mutex);
1487 mutex_unlock(&cpp_dev->mutex);
1491 static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1495 struct cpp_device *cpp_dev = NULL;
1496 struct msm_device_queue *processing_q = NULL;
1497 struct msm_device_queue *eventData_q = NULL;
1500 pr_err("Wrong input sd parameter");
1503 cpp_dev = v4l2_get_subdevdata(sd);
1506 pr_err("failed: cpp_dev %pK\n", cpp_dev);
1510 mutex_lock(&cpp_dev->mutex);
1512 processing_q = &cpp_dev->processing_q;
1513 eventData_q = &cpp_dev->eventData_q;
1515 if (cpp_dev->cpp_open_cnt == 0) {
1516 mutex_unlock(&cpp_dev->mutex);
1520 for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
1521 if (cpp_dev->cpp_subscribe_list[i].active == 1) {
1522 cpp_dev->cpp_subscribe_list[i].active = 0;
1523 cpp_dev->cpp_subscribe_list[i].vfh = NULL;
1527 if (i == MAX_ACTIVE_CPP_INSTANCE) {
1528 pr_err("Invalid close\n");
1529 mutex_unlock(&cpp_dev->mutex);
1533 if (cpp_dev->turbo_vote == 1) {
1534 rc = cx_ipeak_update(cpp_dev->cpp_cx_ipeak, false);
1536 pr_err("cx_ipeak_update failed");
1538 cpp_dev->turbo_vote = 0;
1541 cpp_dev->cpp_open_cnt--;
1542 if (cpp_dev->cpp_open_cnt == 0) {
1543 pr_debug("irq_status: 0x%x\n",
1544 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4));
1545 pr_debug("DEBUG_SP: 0x%x\n",
1546 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x40));
1547 pr_debug("DEBUG_T: 0x%x\n",
1548 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x44));
1549 pr_debug("DEBUG_N: 0x%x\n",
1550 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x48));
1551 pr_debug("DEBUG_R: 0x%x\n",
1552 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4C));
1553 pr_debug("DEBUG_OPPC: 0x%x\n",
1554 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x50));
1555 pr_debug("DEBUG_MO: 0x%x\n",
1556 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x54));
1557 pr_debug("DEBUG_TIMER0: 0x%x\n",
1558 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x60));
1559 pr_debug("DEBUG_TIMER1: 0x%x\n",
1560 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x64));
1561 pr_debug("DEBUG_GPI: 0x%x\n",
1562 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x70));
1563 pr_debug("DEBUG_GPO: 0x%x\n",
1564 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x74));
1565 pr_debug("DEBUG_T0: 0x%x\n",
1566 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x80));
1567 pr_debug("DEBUG_R0: 0x%x\n",
1568 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x84));
1569 pr_debug("DEBUG_T1: 0x%x\n",
1570 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x88));
1571 pr_debug("DEBUG_R1: 0x%x\n",
1572 msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
1573 msm_camera_io_w(0x0, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
1574 msm_cpp_clear_timer(cpp_dev);
1575 cpp_release_hardware(cpp_dev);
1576 if (cpp_dev->iommu_state == CPP_IOMMU_STATE_ATTACHED) {
1577 if (cpp_dev->security_mode == SECURE_MODE)
1578 rc = cam_smmu_ops(cpp_dev->iommu_hdl,
1579 CAM_SMMU_DETACH_SEC_CPP);
1581 rc = cam_smmu_ops(cpp_dev->iommu_hdl,
1585 pr_err("Error: Detach fail in release\n");
1586 cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
1588 cam_smmu_destroy_handle(cpp_dev->iommu_hdl);
1589 msm_cpp_empty_list(processing_q, list_frame);
1590 msm_cpp_empty_list(eventData_q, list_eventdata);
1591 cpp_dev->state = CPP_STATE_OFF;
1593 if (cpp_dev->ion_client) {
1594 CPP_DBG("Invoking ion_client_destroy()\n");
1595 ion_client_destroy(cpp_dev->ion_client);
1596 cpp_dev->ion_client = NULL;
1600 /* unregister vbif error handler */
1601 msm_cpp_vbif_register_error_handler(cpp_dev,
1602 VBIF_CLIENT_CPP, NULL);
1603 mutex_unlock(&cpp_dev->mutex);
1607 static const struct v4l2_subdev_internal_ops msm_cpp_internal_ops = {
1608 .open = cpp_open_node,
1609 .close = cpp_close_node,
1612 static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
1613 uint32_t buff_mgr_ops, uint32_t ids,
1618 switch (buff_mgr_ops) {
1619 case VIDIOC_MSM_BUF_MNGR_IOCTL_CMD: {
1620 rc = msm_cpp_buffer_private_ops(cpp_dev, buff_mgr_ops,
1624 case VIDIOC_MSM_BUF_MNGR_PUT_BUF:
1625 case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
1626 case VIDIOC_MSM_BUF_MNGR_GET_BUF:
1628 struct msm_buf_mngr_info *buff_mgr_info =
1629 (struct msm_buf_mngr_info *)arg;
1630 rc = cpp_dev->buf_mgr_ops.msm_cam_buf_mgr_ops(buff_mgr_ops,
1636 pr_debug("%s: line %d rc = %d\n", __func__, __LINE__, rc);
1640 static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev,
1643 struct v4l2_event v4l2_evt;
1644 struct msm_queue_cmd *frame_qcmd = NULL;
1645 struct msm_queue_cmd *event_qcmd = NULL;
1646 struct msm_cpp_frame_info_t *processed_frame = NULL;
1647 struct msm_device_queue *queue = &cpp_dev->processing_q;
1648 struct msm_buf_mngr_info buff_mgr_info;
1651 frame_qcmd = msm_dequeue(queue, list_frame, POP_FRONT);
1653 processed_frame = frame_qcmd->command;
1654 do_gettimeofday(&(processed_frame->out_time));
1656 event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
1658 pr_err("Insufficient memory\n");
1661 atomic_set(&event_qcmd->on_heap, 1);
1662 event_qcmd->command = processed_frame;
1663 CPP_DBG("fid %d\n", processed_frame->frame_id);
1664 msm_enqueue(&cpp_dev->eventData_q, &event_qcmd->list_eventdata);
1666 if ((processed_frame->partial_frame_indicator != 0) &&
1667 (processed_frame->last_payload == 0))
1668 goto NOTIFY_FRAME_DONE;
1670 if (!processed_frame->output_buffer_info[0].processed_divert &&
1671 !processed_frame->output_buffer_info[0].native_buff &&
1672 !processed_frame->we_disable) {
1674 int32_t iden = processed_frame->identity;
1676 SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(processed_frame,
1677 iden, processed_frame->duplicate_identity);
1679 memset(&buff_mgr_info, 0,
1680 sizeof(struct msm_buf_mngr_info));
1682 buff_mgr_info.session_id = ((iden >> 16) & 0xFFFF);
1683 buff_mgr_info.stream_id = (iden & 0xFFFF);
1684 buff_mgr_info.frame_id = processed_frame->frame_id;
1685 buff_mgr_info.timestamp = processed_frame->timestamp;
1686 if (processed_frame->batch_info.batch_mode ==
1688 (IS_BATCH_BUFFER_ON_PREVIEW(
1689 processed_frame))) {
1690 buff_mgr_info.index =
1691 processed_frame->batch_info.cont_idx;
1693 buff_mgr_info.index = processed_frame->
1694 output_buffer_info[0].index;
1697 rc = msm_cpp_buffer_ops(cpp_dev,
1698 VIDIOC_MSM_BUF_MNGR_PUT_BUF,
1699 0x0, &buff_mgr_info);
1701 pr_err("error putting buffer\n");
1705 rc = msm_cpp_buffer_ops(cpp_dev,
1706 VIDIOC_MSM_BUF_MNGR_BUF_DONE,
1707 0x0, &buff_mgr_info);
1709 pr_err("error putting buffer\n");
1715 if (processed_frame->duplicate_output &&
1717 duplicate_buffer_info.processed_divert &&
1718 !processed_frame->we_disable) {
1719 int32_t iden = processed_frame->duplicate_identity;
1721 SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(processed_frame,
1722 iden, processed_frame->identity);
1724 memset(&buff_mgr_info, 0,
1725 sizeof(struct msm_buf_mngr_info));
1727 buff_mgr_info.session_id = ((iden >> 16) & 0xFFFF);
1728 buff_mgr_info.stream_id = (iden & 0xFFFF);
1729 buff_mgr_info.frame_id = processed_frame->frame_id;
1730 buff_mgr_info.timestamp = processed_frame->timestamp;
1731 buff_mgr_info.index =
1732 processed_frame->duplicate_buffer_info.index;
1734 rc = msm_cpp_buffer_ops(cpp_dev,
1735 VIDIOC_MSM_BUF_MNGR_PUT_BUF,
1736 0x0, &buff_mgr_info);
1738 pr_err("error putting buffer\n");
1742 rc = msm_cpp_buffer_ops(cpp_dev,
1743 VIDIOC_MSM_BUF_MNGR_BUF_DONE,
1744 0x0, &buff_mgr_info);
1746 pr_err("error putting buffer\n");
1752 v4l2_evt.id = processed_frame->inst_id;
1753 v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
1754 v4l2_event_queue(cpp_dev->msm_sd.sd.devnode, &v4l2_evt);
1759 #if MSM_CPP_DUMP_FRM_CMD
1760 static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info)
1763 struct cpp_device *cpp_dev = cpp_timer.data.cpp_dev;
1765 CPP_DBG("-- start: cpp frame cmd for identity=0x%x, frame_id=%d --\n",
1766 frame_info->identity, frame_info->frame_id);
1768 CPP_DBG("msg[%03d] = 0x%08x\n", 0, 0x6);
1769 /* send top level and plane level */
1770 for (i = 0; i < cpp_dev->payload_params.stripe_base; i++)
1771 CPP_DBG("msg[%03d] = 0x%08x\n", i,
1772 frame_info->cpp_cmd_msg[i]);
1774 i1 = cpp_dev->payload_params.stripe_base +
1775 cpp_dev->payload_params.stripe_size *
1776 frame_info->first_stripe_index;
1777 i2 = cpp_dev->payload_params.stripe_size *
1778 (frame_info->last_stripe_index -
1779 frame_info->first_stripe_index + 1);
1780 for (i = 0; i < i2; i++)
1781 CPP_DBG("msg[%03d] = 0x%08x\n", i+i1,
1782 frame_info->cpp_cmd_msg[i+i1]);
1784 CPP_DBG("msg[%03d] = 0x%08x\n", i+i1, MSM_CPP_MSG_ID_TRAILER);
1785 CPP_DBG("-- end: cpp frame cmd for identity=0x%x, frame_id=%d --\n",
1786 frame_info->identity, frame_info->frame_id);
1790 static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info)
1796 static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
1801 msm_cpp_notify_frame_done(cpp_dev, 1);
1804 atomic_set(&cpp_timer.used, 0);
1805 for (i = 0; i < MAX_CPP_PROCESSING_FRAME; i++)
1806 cpp_timer.data.processed_frame[i] = NULL;
1809 static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev,
1810 uint8_t enable, uint32_t irq_mask)
1812 msm_camera_io_w_mb(irq_mask, cpp_dev->base +
1813 MSM_CPP_MICRO_IRQGEN_MASK);
1814 msm_camera_io_w_mb(0xFFFF, cpp_dev->base +
1815 MSM_CPP_MICRO_IRQGEN_CLR);
1817 enable_irq(cpp_dev->irq->start);
1820 static void msm_cpp_do_timeout_work(struct work_struct *work)
1822 uint32_t j = 0, i = 0, i1 = 0, i2 = 0;
1823 int32_t queue_len = 0, rc = 0, fifo_counter = 0;
1824 struct msm_device_queue *queue = NULL;
1825 struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
1826 struct cpp_device *cpp_dev = cpp_timer.data.cpp_dev;
1828 pr_warn("cpp_timer_callback called. (jiffies=%lu)\n",
1830 mutex_lock(&cpp_dev->mutex);
1832 if (!work || (cpp_timer.data.cpp_dev->state != CPP_STATE_ACTIVE)) {
1833 pr_err("Invalid work:%pK or state:%d\n", work,
1834 cpp_timer.data.cpp_dev->state);
1835 /* Do not flush queue here as it is not a fatal error */
1838 if (!atomic_read(&cpp_timer.used)) {
1839 pr_warn("Delayed trigger, IRQ serviced\n");
1840 /* Do not flush queue here as it is not a fatal error */
1844 msm_camera_enable_irq(cpp_timer.data.cpp_dev->irq, false);
1845 /* make sure all the pending queued entries are scheduled */
1846 tasklet_kill(&cpp_dev->cpp_tasklet);
1848 queue = &cpp_timer.data.cpp_dev->processing_q;
1849 queue_len = queue->len;
1851 pr_err("%s:%d: irq serviced after timeout.Ignore timeout\n",
1852 __func__, __LINE__);
1853 msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
1857 pr_debug("Reloading firmware %d\n", queue_len);
1858 rc = cpp_load_fw(cpp_timer.data.cpp_dev,
1859 cpp_timer.data.cpp_dev->fw_name_bin);
1861 pr_warn("Firmware loading failed-retry\n");
1862 rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
1864 pr_err("Firmware loading failed\n");
1868 pr_debug("Firmware loading done\n");
1871 if (!atomic_read(&cpp_timer.used)) {
1872 pr_warn("Delayed trigger, IRQ serviced\n");
1873 /* Do not flush queue here as it is not a fatal error */
1874 msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
1875 cpp_dev->timeout_trial_cnt = 0;
1879 if (cpp_dev->timeout_trial_cnt >=
1880 cpp_dev->max_timeout_trial_cnt) {
1881 pr_warn("Max trial reached\n");
1882 msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
1883 msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
1887 for (i = 0; i < queue_len; i++) {
1888 processed_frame[i] = cpp_timer.data.processed_frame[i];
1889 if (!processed_frame[i]) {
1890 pr_warn("process frame null , queue len %d", queue_len);
1891 msm_cpp_flush_queue_and_release_buffer(cpp_dev,
1893 msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
1898 atomic_set(&cpp_timer.used, 1);
1899 pr_warn("Starting timer to fire in %d ms. (jiffies=%lu)\n",
1900 CPP_CMD_TIMEOUT_MS, jiffies);
1901 mod_timer(&cpp_timer.cpp_timer,
1902 jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
1904 msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
1906 for (i = 0; i < queue_len; i++) {
1907 pr_warn("Rescheduling for identity=0x%x, frame_id=%03d\n",
1908 processed_frame[i]->identity,
1909 processed_frame[i]->frame_id);
1911 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
1913 pr_err("%s:%d: Reschedule payload failed %d\n",
1914 __func__, __LINE__, rc);
1917 msm_cpp_write(0x6, cpp_dev->base);
1919 /* send top level and plane level */
1920 for (j = 0; j < cpp_dev->payload_params.stripe_base; j++,
1922 if (fifo_counter % MSM_CPP_RX_FIFO_LEVEL == 0) {
1923 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
1925 pr_err("%s:%d] poll failed %d rc %d",
1926 __func__, __LINE__, j, rc);
1931 msm_cpp_write(processed_frame[i]->cpp_cmd_msg[j],
1935 pr_err("%s:%d: Rescheduling plane info failed %d\n",
1936 __func__, __LINE__, rc);
1940 i1 = cpp_dev->payload_params.stripe_base +
1941 cpp_dev->payload_params.stripe_size *
1942 processed_frame[i]->first_stripe_index;
1943 i2 = cpp_dev->payload_params.stripe_size *
1944 (processed_frame[i]->last_stripe_index -
1945 processed_frame[i]->first_stripe_index + 1);
1946 for (j = 0; j < i2; j++, fifo_counter++) {
1947 if (fifo_counter % MSM_CPP_RX_FIFO_LEVEL == 0) {
1948 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
1950 pr_err("%s:%d] poll failed %d rc %d",
1951 __func__, __LINE__, j, rc);
1956 msm_cpp_write(processed_frame[i]->cpp_cmd_msg[j+i1],
1960 pr_err("%s:%d] Rescheduling stripe info failed %d\n",
1961 __func__, __LINE__, rc);
1966 if (fifo_counter % MSM_CPP_RX_FIFO_LEVEL == 0) {
1967 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
1969 pr_err("%s:%d] Reschedule trailer failed %d\n",
1970 __func__, __LINE__, rc);
1975 msm_cpp_write(0xabcdefaa, cpp_dev->base);
1976 pr_debug("After frame:%d write\n", i+1);
1979 cpp_timer.data.cpp_dev->timeout_trial_cnt++;
1982 mutex_unlock(&cpp_dev->mutex);
1983 pr_debug("%s:%d] exit\n", __func__, __LINE__);
1986 cpp_dev->state = CPP_STATE_OFF;
1987 /* flush the queue */
1988 msm_cpp_flush_queue_and_release_buffer(cpp_dev,
1990 msm_cpp_set_micro_irq_mask(cpp_dev, 0, 0x0);
1991 cpp_dev->timeout_trial_cnt = 0;
1992 mutex_unlock(&cpp_dev->mutex);
1993 pr_debug("%s:%d] exit\n", __func__, __LINE__);
1997 void cpp_timer_callback(unsigned long data)
1999 struct msm_cpp_work_t *work =
2000 cpp_timer.data.cpp_dev->work;
2001 queue_work(cpp_timer.data.cpp_dev->timer_wq,
2002 (struct work_struct *)work);
2005 static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
2006 struct msm_queue_cmd *frame_qcmd)
2008 unsigned long flags;
2010 int32_t rc = -EAGAIN;
2011 struct msm_cpp_frame_info_t *process_frame;
2012 struct msm_queue_cmd *qcmd = NULL;
2013 uint32_t queue_len = 0, fifo_counter = 0;
2015 if (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
2016 process_frame = frame_qcmd->command;
2017 msm_cpp_dump_frame_cmd(process_frame);
2018 spin_lock_irqsave(&cpp_timer.data.processed_frame_lock, flags);
2019 msm_enqueue(&cpp_dev->processing_q,
2020 &frame_qcmd->list_frame);
2021 cpp_timer.data.processed_frame[cpp_dev->processing_q.len - 1] =
2023 queue_len = cpp_dev->processing_q.len;
2024 spin_unlock_irqrestore(&cpp_timer.data.processed_frame_lock,
2026 atomic_set(&cpp_timer.used, 1);
2028 CPP_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
2029 CPP_CMD_TIMEOUT_MS, jiffies);
2030 if (mod_timer(&cpp_timer.cpp_timer,
2031 (jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS))) != 0)
2032 CPP_DBG("Timer has not expired yet\n");
2034 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
2036 pr_err("%s:%d: Scheduling payload failed %d",
2037 __func__, __LINE__, rc);
2040 msm_cpp_write(0x6, cpp_dev->base);
2042 /* send top level and plane level */
2043 for (i = 0; i < cpp_dev->payload_params.stripe_base; i++,
2045 if ((fifo_counter % MSM_CPP_RX_FIFO_LEVEL) == 0) {
2046 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
2051 msm_cpp_write(process_frame->cpp_cmd_msg[i],
2055 pr_err("%s:%d: Scheduling plane info failed %d\n",
2056 __func__, __LINE__, rc);
2060 i1 = cpp_dev->payload_params.stripe_base +
2061 cpp_dev->payload_params.stripe_size *
2062 process_frame->first_stripe_index;
2063 i2 = cpp_dev->payload_params.stripe_size *
2064 (process_frame->last_stripe_index -
2065 process_frame->first_stripe_index + 1);
2066 for (i = 0; i < i2; i++, fifo_counter++) {
2067 if ((fifo_counter % MSM_CPP_RX_FIFO_LEVEL) == 0) {
2068 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
2073 msm_cpp_write(process_frame->cpp_cmd_msg[i+i1],
2077 pr_err("%s:%d: Scheduling stripe info failed %d\n",
2078 __func__, __LINE__, rc);
2082 if ((fifo_counter % MSM_CPP_RX_FIFO_LEVEL) == 0) {
2083 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
2085 pr_err("%s: Scheduling trailer failed %d\n",
2091 msm_cpp_write(MSM_CPP_MSG_ID_TRAILER, cpp_dev->base);
2093 do_gettimeofday(&(process_frame->in_time));
2096 pr_err("process queue full. drop frame\n");
2102 qcmd = msm_dequeue(&cpp_dev->processing_q, list_frame,
2105 pr_warn("%s:%d: no queue cmd\n", __func__, __LINE__);
2106 spin_lock_irqsave(&cpp_timer.data.processed_frame_lock,
2108 queue_len = cpp_dev->processing_q.len;
2109 spin_unlock_irqrestore(
2110 &cpp_timer.data.processed_frame_lock, flags);
2111 if (queue_len == 0) {
2112 atomic_set(&cpp_timer.used, 0);
2113 del_timer(&cpp_timer.cpp_timer);
2120 static int msm_cpp_send_command_to_hardware(struct cpp_device *cpp_dev,
2121 uint32_t *cmd_msg, uint32_t payload_size)
2126 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
2128 pr_err("%s:%d] poll rx empty failed %d",
2129 __func__, __LINE__, rc);
2133 for (i = 0; i < payload_size; i++) {
2134 msm_cpp_write(cmd_msg[i], cpp_dev->base);
2135 if (i % MSM_CPP_RX_FIFO_LEVEL == 0) {
2136 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
2138 pr_err("%s:%d] poll rx empty failed %d",
2139 __func__, __LINE__, rc);
2148 static int msm_cpp_flush_frames(struct cpp_device *cpp_dev)
2153 static struct msm_cpp_frame_info_t *msm_cpp_get_frame(
2154 struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
2156 uint32_t *cpp_frame_msg;
2157 struct msm_cpp_frame_info_t *new_frame = NULL;
2160 new_frame = kzalloc(sizeof(struct msm_cpp_frame_info_t), GFP_KERNEL);
2163 pr_err("Insufficient memory\n");
2168 rc = (copy_from_user(new_frame, (void __user *)ioctl_ptr->ioctl_ptr,
2169 sizeof(struct msm_cpp_frame_info_t)) ? -EFAULT : 0);
2171 ERR_COPY_FROM_USER();
2175 if ((new_frame->msg_len == 0) ||
2176 (new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
2177 pr_err("%s:%d: Invalid frame len:%d\n", __func__,
2178 __LINE__, new_frame->msg_len);
2182 cpp_frame_msg = kzalloc(sizeof(uint32_t) * new_frame->msg_len,
2184 if (!cpp_frame_msg) {
2185 pr_err("Insufficient memory\n");
2189 rc = (copy_from_user(cpp_frame_msg,
2190 (void __user *)new_frame->cpp_cmd_msg,
2191 sizeof(uint32_t) * new_frame->msg_len) ? -EFAULT : 0);
2193 ERR_COPY_FROM_USER();
2196 new_frame->cpp_cmd_msg = cpp_frame_msg;
2200 kfree(cpp_frame_msg);
2207 static int msm_cpp_check_buf_type(struct msm_buf_mngr_info *buff_mgr_info,
2208 struct msm_cpp_frame_info_t *new_frame)
2210 int32_t num_output_bufs = 0;
2213 if (buff_mgr_info->type == MSM_CAMERA_BUF_MNGR_BUF_USER) {
2214 new_frame->batch_info.cont_idx =
2215 buff_mgr_info->index;
2216 num_output_bufs = buff_mgr_info->user_buf.buf_cnt;
2217 if (buff_mgr_info->user_buf.buf_cnt <
2218 new_frame->batch_info.batch_size) {
2219 /* Less bufs than Input buffer */
2220 num_output_bufs = buff_mgr_info->user_buf.buf_cnt;
2222 /* More or equal bufs as Input buffer */
2223 num_output_bufs = new_frame->batch_info.batch_size;
2225 if (num_output_bufs > MSM_OUTPUT_BUF_CNT)
2227 for (i = 0; i < num_output_bufs; i++) {
2228 new_frame->output_buffer_info[i].index =
2229 buff_mgr_info->user_buf.buf_idx[i];
2232 /* For non-group case use first buf slot */
2233 new_frame->output_buffer_info[0].index = buff_mgr_info->index;
2234 num_output_bufs = 1;
2237 return num_output_bufs;
2240 static void msm_cpp_update_frame_msg_phy_address(struct cpp_device *cpp_dev,
2241 struct msm_cpp_frame_info_t *new_frame, unsigned long in_phyaddr,
2242 unsigned long out_phyaddr0, unsigned long out_phyaddr1,
2243 unsigned long tnr_scratch_buffer0, unsigned long tnr_scratch_buffer1)
2245 int32_t stripe_base, plane_base;
2246 uint32_t rd_pntr_off, wr_0_pntr_off, wr_1_pntr_off,
2247 wr_2_pntr_off, wr_3_pntr_off;
2248 uint32_t wr_0_meta_data_wr_pntr_off, wr_1_meta_data_wr_pntr_off,
2249 wr_2_meta_data_wr_pntr_off, wr_3_meta_data_wr_pntr_off;
2250 uint32_t rd_ref_pntr_off, wr_ref_pntr_off;
2251 uint32_t stripe_size, plane_size;
2252 uint32_t fe_mmu_pf_ptr_off, ref_fe_mmu_pf_ptr_off, we_mmu_pf_ptr_off,
2253 dup_we_mmu_pf_ptr_off, ref_we_mmu_pf_ptr_off;
2254 uint8_t tnr_enabled, ubwc_enabled, mmu_pf_en, cds_en;
2256 uint32_t *cpp_frame_msg;
2258 cpp_frame_msg = new_frame->cpp_cmd_msg;
2260 /* Update stripe/plane size and base offsets */
2261 stripe_base = cpp_dev->payload_params.stripe_base;
2262 stripe_size = cpp_dev->payload_params.stripe_size;
2263 plane_base = cpp_dev->payload_params.plane_base;
2264 plane_size = cpp_dev->payload_params.plane_size;
2266 /* Fetch engine Offset */
2267 rd_pntr_off = cpp_dev->payload_params.rd_pntr_off;
2268 /* Write engine offsets */
2269 wr_0_pntr_off = cpp_dev->payload_params.wr_0_pntr_off;
2270 wr_1_pntr_off = wr_0_pntr_off + 1;
2271 wr_2_pntr_off = wr_1_pntr_off + 1;
2272 wr_3_pntr_off = wr_2_pntr_off + 1;
2273 /* Reference engine offsets */
2274 rd_ref_pntr_off = cpp_dev->payload_params.rd_ref_pntr_off;
2275 wr_ref_pntr_off = cpp_dev->payload_params.wr_ref_pntr_off;
2276 /* Meta data offsets */
2277 wr_0_meta_data_wr_pntr_off =
2278 cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off;
2279 wr_1_meta_data_wr_pntr_off = (wr_0_meta_data_wr_pntr_off + 1);
2280 wr_2_meta_data_wr_pntr_off = (wr_1_meta_data_wr_pntr_off + 1);
2281 wr_3_meta_data_wr_pntr_off = (wr_2_meta_data_wr_pntr_off + 1);
2282 /* MMU PF offsets */
2283 fe_mmu_pf_ptr_off = cpp_dev->payload_params.fe_mmu_pf_ptr_off;
2284 ref_fe_mmu_pf_ptr_off = cpp_dev->payload_params.ref_fe_mmu_pf_ptr_off;
2285 we_mmu_pf_ptr_off = cpp_dev->payload_params.we_mmu_pf_ptr_off;
2286 dup_we_mmu_pf_ptr_off = cpp_dev->payload_params.dup_we_mmu_pf_ptr_off;
2287 ref_we_mmu_pf_ptr_off = cpp_dev->payload_params.ref_we_mmu_pf_ptr_off;
2289 pr_debug("feature_mask 0x%x\n", new_frame->feature_mask);
2291 /* Update individual module status from feature mask */
2292 tnr_enabled = ((new_frame->feature_mask & TNR_MASK) >> 2);
2293 ubwc_enabled = ((new_frame->feature_mask & UBWC_MASK) >> 5);
2294 cds_en = ((new_frame->feature_mask & CDS_MASK) >> 6);
2295 mmu_pf_en = ((new_frame->feature_mask & MMU_PF_MASK) >> 7);
2298 * Update the stripe based addresses for fetch/write/reference engines.
2299 * Update meta data offset for ubwc.
2300 * Update ref engine address for cds / tnr.
2302 for (i = 0; i < new_frame->num_strips; i++) {
2303 cpp_frame_msg[stripe_base + rd_pntr_off + i * stripe_size] +=
2304 (uint32_t) in_phyaddr;
2305 cpp_frame_msg[stripe_base + wr_0_pntr_off + i * stripe_size] +=
2306 (uint32_t) out_phyaddr0;
2307 cpp_frame_msg[stripe_base + wr_1_pntr_off + i * stripe_size] +=
2308 (uint32_t) out_phyaddr1;
2309 cpp_frame_msg[stripe_base + wr_2_pntr_off + i * stripe_size] +=
2310 (uint32_t) out_phyaddr0;
2311 cpp_frame_msg[stripe_base + wr_3_pntr_off + i * stripe_size] +=
2312 (uint32_t) out_phyaddr1;
2314 cpp_frame_msg[stripe_base + rd_ref_pntr_off +
2316 (uint32_t)tnr_scratch_buffer0;
2317 cpp_frame_msg[stripe_base + wr_ref_pntr_off +
2319 (uint32_t)tnr_scratch_buffer1;
2320 } else if (cds_en) {
2321 cpp_frame_msg[stripe_base + rd_ref_pntr_off +
2323 (uint32_t)in_phyaddr;
2326 cpp_frame_msg[stripe_base + wr_0_meta_data_wr_pntr_off +
2327 i * stripe_size] += (uint32_t) out_phyaddr0;
2328 cpp_frame_msg[stripe_base + wr_1_meta_data_wr_pntr_off +
2329 i * stripe_size] += (uint32_t) out_phyaddr1;
2330 cpp_frame_msg[stripe_base + wr_2_meta_data_wr_pntr_off +
2331 i * stripe_size] += (uint32_t) out_phyaddr0;
2332 cpp_frame_msg[stripe_base + wr_3_meta_data_wr_pntr_off +
2333 i * stripe_size] += (uint32_t) out_phyaddr1;
2340 /* Update mmu prefetch related plane specific address */
2341 for (i = 0; i < PAYLOAD_NUM_PLANES; i++) {
2342 cpp_frame_msg[plane_base + fe_mmu_pf_ptr_off +
2343 i * plane_size] += (uint32_t)in_phyaddr;
2344 cpp_frame_msg[plane_base + fe_mmu_pf_ptr_off +
2345 i * plane_size + 1] += (uint32_t)in_phyaddr;
2346 cpp_frame_msg[plane_base + ref_fe_mmu_pf_ptr_off +
2347 i * plane_size] += (uint32_t)tnr_scratch_buffer0;
2348 cpp_frame_msg[plane_base + ref_fe_mmu_pf_ptr_off +
2349 i * plane_size + 1] += (uint32_t)tnr_scratch_buffer0;
2350 cpp_frame_msg[plane_base + we_mmu_pf_ptr_off +
2351 i * plane_size] += (uint32_t)out_phyaddr0;
2352 cpp_frame_msg[plane_base + we_mmu_pf_ptr_off +
2353 i * plane_size + 1] += (uint32_t)out_phyaddr0;
2354 cpp_frame_msg[plane_base + dup_we_mmu_pf_ptr_off +
2355 i * plane_size] += (uint32_t)out_phyaddr1;
2356 cpp_frame_msg[plane_base + dup_we_mmu_pf_ptr_off +
2357 i * plane_size + 1] += (uint32_t)out_phyaddr1;
2358 cpp_frame_msg[plane_base + ref_we_mmu_pf_ptr_off +
2359 i * plane_size] += (uint32_t)tnr_scratch_buffer1;
2360 cpp_frame_msg[plane_base + ref_we_mmu_pf_ptr_off +
2361 i * plane_size + 1] += (uint32_t)tnr_scratch_buffer1;
2367 static int32_t msm_cpp_set_group_buffer_duplicate(struct cpp_device *cpp_dev,
2368 struct msm_cpp_frame_info_t *new_frame, unsigned long out_phyaddr,
2369 uint32_t num_output_bufs)
2372 uint32_t *set_group_buffer_w_duplication = NULL;
2374 unsigned long out_phyaddr0, out_phyaddr1, distance;
2376 uint32_t set_group_buffer_len, set_group_buffer_len_bytes,
2377 dup_frame_off, ubwc_enabled, j, i = 0;
2380 int iden = new_frame->identity;
2382 set_group_buffer_len =
2383 cpp_dev->payload_params.set_group_buffer_len;
2384 if (!set_group_buffer_len) {
2385 pr_err("%s: invalid set group buffer cmd len %d\n",
2386 __func__, set_group_buffer_len);
2392 * Length of MSM_CPP_CMD_GROUP_BUFFER_DUP command +
2393 * 4 byte for header + 4 byte for the length field +
2394 * 4 byte for the trailer + 4 byte for
2395 * MSM_CPP_CMD_GROUP_BUFFER_DUP prefix before the payload
2397 set_group_buffer_len += 4;
2398 set_group_buffer_len_bytes = set_group_buffer_len *
2400 set_group_buffer_w_duplication =
2401 kzalloc(set_group_buffer_len_bytes, GFP_KERNEL);
2402 if (!set_group_buffer_w_duplication) {
2403 pr_err("%s: set group buffer data alloc failed\n",
2409 memset(set_group_buffer_w_duplication, 0x0,
2410 set_group_buffer_len_bytes);
2412 cpp_dev->payload_params.dup_frame_indicator_off;
2413 /* Add a factor of 1 as command is prefixed to the payload. */
2415 ubwc_enabled = ((new_frame->feature_mask & UBWC_MASK) >> 5);
2416 ptr = set_group_buffer_w_duplication;
2417 /*create and send Set Group Buffer with Duplicate command*/
2418 *ptr++ = MSM_CPP_CMD_GROUP_BUFFER_DUP;
2419 *ptr++ = MSM_CPP_MSG_ID_CMD;
2421 * This field is the value read from dt and stands for length of
2422 * actual data in payload
2424 *ptr++ = cpp_dev->payload_params.set_group_buffer_len;
2425 *ptr++ = MSM_CPP_CMD_GROUP_BUFFER_DUP;
2427 out_phyaddr0 = out_phyaddr;
2429 SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(new_frame,
2430 iden, new_frame->duplicate_identity);
2432 for (i = 1; i < num_output_bufs; i++) {
2433 out_phyaddr1 = msm_cpp_fetch_buffer_info(cpp_dev,
2434 &new_frame->output_buffer_info[i],
2435 ((iden >> 16) & 0xFFFF),
2437 &new_frame->output_buffer_info[i].fd);
2438 if (!out_phyaddr1) {
2439 pr_err("%s: error getting o/p phy addr\n",
2444 distance = out_phyaddr1 - out_phyaddr0;
2445 out_phyaddr0 = out_phyaddr1;
2446 for (j = 0; j < PAYLOAD_NUM_PLANES; j++)
2449 for (j = 0; j < PAYLOAD_NUM_PLANES; j++)
2450 *ptr++ = ubwc_enabled ? distance : 0;
2455 if (new_frame->duplicate_output)
2456 set_group_buffer_w_duplication[dup_frame_off] =
2457 1 << new_frame->batch_info.pick_preview_idx;
2459 set_group_buffer_w_duplication[dup_frame_off] = 0;
2462 * Index for cpp message id trailer is length of payload for
2463 * set group buffer minus 1
2465 set_group_buffer_w_duplication[set_group_buffer_len - 1] =
2466 MSM_CPP_MSG_ID_TRAILER;
2467 rc = msm_cpp_send_command_to_hardware(cpp_dev,
2468 set_group_buffer_w_duplication, set_group_buffer_len);
2470 pr_err("%s: Send Command Error rc %d\n", __func__, rc);
2476 kfree(set_group_buffer_w_duplication);
2480 static int32_t msm_cpp_set_group_buffer(struct cpp_device *cpp_dev,
2481 struct msm_cpp_frame_info_t *new_frame, unsigned long out_phyaddr,
2482 uint32_t num_output_bufs)
2484 uint32_t set_group_buffer_len;
2485 uint32_t *set_group_buffer = NULL;
2487 unsigned long out_phyaddr0, out_phyaddr1, distance;
2489 uint32_t set_group_buffer_len_bytes, i = 0;
2490 bool batching_valid = false;
2492 if ((IS_BATCH_BUFFER_ON_PREVIEW(new_frame)) ||
2493 new_frame->batch_info.batch_mode == BATCH_MODE_VIDEO)
2494 batching_valid = true;
2496 if (!batching_valid) {
2497 pr_debug("%s: batch mode %d, batching valid %d\n",
2498 __func__, new_frame->batch_info.batch_mode,
2503 if (new_frame->batch_info.batch_size <= 1) {
2504 pr_debug("%s: batch size is invalid %d\n", __func__,
2505 new_frame->batch_info.batch_size);
2509 if ((new_frame->feature_mask & BATCH_DUP_MASK) >> 8) {
2510 return msm_cpp_set_group_buffer_duplicate(cpp_dev, new_frame,
2511 out_phyaddr, num_output_bufs);
2514 if (new_frame->duplicate_output) {
2515 pr_err("cannot support duplication enable\n");
2520 set_group_buffer_len =
2521 2 + 3 * (num_output_bufs - 1);
2523 * Length of MSM_CPP_CMD_GROUP_BUFFER command +
2524 * 4 byte for header + 4 byte for the length field +
2525 * 4 byte for the trailer + 4 byte for
2526 * MSM_CPP_CMD_GROUP_BUFFER prefix before the payload
2528 set_group_buffer_len += 4;
2529 set_group_buffer_len_bytes = set_group_buffer_len *
2532 kzalloc(set_group_buffer_len_bytes, GFP_KERNEL);
2533 if (!set_group_buffer) {
2534 pr_err("%s: set group buffer data alloc failed\n",
2540 memset(set_group_buffer, 0x0,
2541 set_group_buffer_len_bytes);
2542 ptr = set_group_buffer;
2543 /*Create and send Set Group Buffer*/
2544 *ptr++ = MSM_CPP_CMD_GROUP_BUFFER;
2545 *ptr++ = MSM_CPP_MSG_ID_CMD;
2547 * This field is the value read from dt and stands
2548 * for length of actual data in payload
2550 *ptr++ = set_group_buffer_len - 4;
2551 *ptr++ = MSM_CPP_CMD_GROUP_BUFFER;
2553 out_phyaddr0 = out_phyaddr;
2555 for (i = 1; i < num_output_bufs; i++) {
2557 msm_cpp_fetch_buffer_info(cpp_dev,
2558 &new_frame->output_buffer_info[i],
2559 ((new_frame->identity >> 16) & 0xFFFF),
2560 (new_frame->identity & 0xFFFF),
2561 &new_frame->output_buffer_info[i].fd);
2562 if (!out_phyaddr1) {
2563 pr_err("%s: error getting o/p phy addr\n",
2568 distance = out_phyaddr1 - out_phyaddr0;
2569 out_phyaddr0 = out_phyaddr1;
2578 * Index for cpp message id trailer is length of
2579 * payload for set group buffer minus 1
2581 set_group_buffer[set_group_buffer_len - 1] =
2582 MSM_CPP_MSG_ID_TRAILER;
2583 rc = msm_cpp_send_command_to_hardware(cpp_dev,
2584 set_group_buffer, set_group_buffer_len);
2586 pr_err("Send Command Error rc %d\n", rc);
2589 kfree(set_group_buffer);
2594 static int msm_cpp_cfg_frame(struct cpp_device *cpp_dev,
2595 struct msm_cpp_frame_info_t *new_frame)
2598 struct msm_queue_cmd *frame_qcmd = NULL;
2599 uint32_t *cpp_frame_msg;
2600 unsigned long in_phyaddr, out_phyaddr0 = (unsigned long)NULL;
2601 unsigned long out_phyaddr1;
2602 unsigned long tnr_scratch_buffer0, tnr_scratch_buffer1;
2603 uint16_t num_stripes = 0;
2604 struct msm_buf_mngr_info buff_mgr_info, dup_buff_mgr_info;
2606 int32_t num_output_bufs = 1;
2607 uint32_t stripe_base = 0;
2608 uint32_t stripe_size;
2609 uint8_t tnr_enabled;
2610 enum msm_camera_buf_mngr_buf_type buf_type =
2611 MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
2612 uint32_t ioctl_cmd, idx;
2613 uint32_t op_index, dup_index;
2615 stripe_base = cpp_dev->payload_params.stripe_base;
2616 stripe_size = cpp_dev->payload_params.stripe_size;
2619 pr_err("%s: Frame is Null\n", __func__);
2623 if (cpp_dev->state == CPP_STATE_OFF) {
2624 pr_err("%s: cpp state is off, return fatal error\n", __func__);
2628 cpp_frame_msg = new_frame->cpp_cmd_msg;
2630 if (cpp_frame_msg == NULL ||
2631 (new_frame->msg_len < MSM_CPP_MIN_FRAME_LENGTH)) {
2632 pr_err("Length is not correct or frame message is missing\n");
2636 if (cpp_dev->fault_status == CPP_IOMMU_FAULT_RECOVERED) {
2637 pr_err("Error, page fault occurred %d\n",
2638 cpp_dev->fault_status);
2640 } else if (cpp_dev->fault_status == CPP_IOMMU_FAULT_DETECTED) {
2641 pr_err("drop frame, page fault occurred %d\n",
2642 cpp_dev->fault_status);
2646 if (cpp_frame_msg[new_frame->msg_len - 1] !=
2647 MSM_CPP_MSG_ID_TRAILER) {
2648 pr_err("Invalid frame message\n");
2652 /* Stripe index starts at zero */
2653 if ((!new_frame->num_strips) ||
2654 (new_frame->first_stripe_index >= new_frame->num_strips) ||
2655 (new_frame->last_stripe_index >= new_frame->num_strips) ||
2656 (new_frame->first_stripe_index >
2657 new_frame->last_stripe_index)) {
2658 pr_err("Invalid frame message, #stripes=%d, stripe indices=[%d,%d]\n",
2659 new_frame->num_strips,
2660 new_frame->first_stripe_index,
2661 new_frame->last_stripe_index);
2666 pr_err("Invalid frame message, invalid stripe_size (%d)!\n",
2671 if ((stripe_base == UINT_MAX) ||
2672 (new_frame->num_strips >
2673 (UINT_MAX - 1 - stripe_base) / stripe_size)) {
2674 pr_err("Invalid frame message, num_strips %d is large\n",
2675 new_frame->num_strips);
2679 if ((stripe_base + new_frame->num_strips * stripe_size + 1) !=
2680 new_frame->msg_len) {
2681 pr_err("Invalid frame message,len=%d,expected=%d\n",
2684 new_frame->num_strips * stripe_size + 1));
2688 if (cpp_dev->iommu_state != CPP_IOMMU_STATE_ATTACHED) {
2689 pr_err("IOMMU is not attached\n");
2693 in_phyaddr = msm_cpp_fetch_buffer_info(cpp_dev,
2694 &new_frame->input_buffer_info,
2695 ((new_frame->input_buffer_info.identity >> 16) & 0xFFFF),
2696 (new_frame->input_buffer_info.identity & 0xFFFF), &in_fd);
2698 pr_err("%s: error gettting input physical address\n", __func__);
2703 op_index = new_frame->output_buffer_info[0].index;
2704 dup_index = new_frame->duplicate_buffer_info.index;
2706 if (new_frame->we_disable == 0) {
2707 int32_t iden = new_frame->identity;
2709 if ((new_frame->output_buffer_info[0].native_buff == 0) &&
2710 (new_frame->first_payload)) {
2711 memset(&buff_mgr_info, 0,
2712 sizeof(struct msm_buf_mngr_info));
2713 if ((new_frame->batch_info.batch_mode ==
2714 BATCH_MODE_VIDEO) ||
2715 (IS_BATCH_BUFFER_ON_PREVIEW(new_frame)))
2716 buf_type = MSM_CAMERA_BUF_MNGR_BUF_USER;
2718 SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(new_frame,
2719 iden, new_frame->duplicate_identity);
2722 * Swap the input buffer index for batch mode with
2725 SWAP_BUF_INDEX_FOR_BATCH_ON_PREVIEW(new_frame,
2726 buff_mgr_info, op_index, dup_index);
2728 buff_mgr_info.session_id = ((iden >> 16) & 0xFFFF);
2729 buff_mgr_info.stream_id = (iden & 0xFFFF);
2730 buff_mgr_info.type = buf_type;
2732 if (IS_DEFAULT_OUTPUT_BUF_INDEX(buff_mgr_info.index)) {
2733 ioctl_cmd = VIDIOC_MSM_BUF_MNGR_GET_BUF;
2736 ioctl_cmd = VIDIOC_MSM_BUF_MNGR_IOCTL_CMD;
2738 MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX;
2740 rc = msm_cpp_buffer_ops(cpp_dev,
2741 ioctl_cmd, idx, &buff_mgr_info);
2744 pr_debug("%s:get_buf err rc:%d, index %d\n",
2746 new_frame->output_buffer_info[0].index);
2750 msm_cpp_check_buf_type(&buff_mgr_info,
2752 if (!num_output_bufs) {
2753 pr_err("%s: error getting buffer %d\n",
2754 __func__, num_output_bufs);
2760 out_phyaddr0 = msm_cpp_fetch_buffer_info(cpp_dev,
2761 &new_frame->output_buffer_info[0],
2762 ((iden >> 16) & 0xFFFF),
2764 &new_frame->output_buffer_info[0].fd);
2765 if (!out_phyaddr0) {
2766 pr_err("%s: error gettting output physical address\n",
2772 out_phyaddr1 = out_phyaddr0;
2774 /* get buffer for duplicate output */
2775 if (new_frame->duplicate_output) {
2776 int32_t iden = new_frame->duplicate_identity;
2778 CPP_DBG("duplication enabled, dup_id=0x%x",
2779 new_frame->duplicate_identity);
2781 SWAP_IDENTITY_FOR_BATCH_ON_PREVIEW(new_frame,
2782 iden, new_frame->identity);
2784 memset(&dup_buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
2787 * Swap the input buffer index for batch mode with
2790 SWAP_BUF_INDEX_FOR_BATCH_ON_PREVIEW(new_frame,
2791 dup_buff_mgr_info, dup_index, op_index);
2793 dup_buff_mgr_info.session_id = ((iden >> 16) & 0xFFFF);
2794 dup_buff_mgr_info.stream_id = (iden & 0xFFFF);
2795 dup_buff_mgr_info.type =
2796 MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
2797 if (IS_DEFAULT_OUTPUT_BUF_INDEX(dup_buff_mgr_info.index)) {
2798 ioctl_cmd = VIDIOC_MSM_BUF_MNGR_GET_BUF;
2801 ioctl_cmd = VIDIOC_MSM_BUF_MNGR_IOCTL_CMD;
2802 idx = MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX;
2804 rc = msm_cpp_buffer_ops(cpp_dev, ioctl_cmd, idx,
2805 &dup_buff_mgr_info);
2808 pr_debug("%s: get_buf err rc:%d, index %d\n",
2810 new_frame->duplicate_buffer_info.index);
2813 new_frame->duplicate_buffer_info.index =
2814 dup_buff_mgr_info.index;
2815 out_phyaddr1 = msm_cpp_fetch_buffer_info(cpp_dev,
2816 &new_frame->duplicate_buffer_info,
2817 ((iden >> 16) & 0xFFFF),
2819 &new_frame->duplicate_buffer_info.fd);
2820 if (!out_phyaddr1) {
2821 pr_err("error gettting output physical address\n");
2823 msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
2824 0x0, &dup_buff_mgr_info);
2827 /* set duplicate enable bit */
2828 cpp_frame_msg[5] |= 0x1;
2829 CPP_DBG("out_phyaddr1= %08x\n", (uint32_t)out_phyaddr1);
2832 tnr_enabled = ((new_frame->feature_mask & TNR_MASK) >> 2);
2834 tnr_scratch_buffer0 = msm_cpp_fetch_buffer_info(cpp_dev,
2835 &new_frame->tnr_scratch_buffer_info[0],
2836 ((new_frame->identity >> 16) & 0xFFFF),
2837 (new_frame->identity & 0xFFFF),
2838 &new_frame->tnr_scratch_buffer_info[0].fd);
2839 if (!tnr_scratch_buffer0) {
2840 pr_err("error getting scratch buffer physical address\n");
2845 tnr_scratch_buffer1 = msm_cpp_fetch_buffer_info(cpp_dev,
2846 &new_frame->tnr_scratch_buffer_info[1],
2847 ((new_frame->identity >> 16) & 0xFFFF),
2848 (new_frame->identity & 0xFFFF),
2849 &new_frame->tnr_scratch_buffer_info[1].fd);
2850 if (!tnr_scratch_buffer1) {
2851 pr_err("error getting scratch buffer physical address\n");
2856 tnr_scratch_buffer0 = 0;
2857 tnr_scratch_buffer1 = 0;
2861 msm_cpp_update_frame_msg_phy_address(cpp_dev, new_frame,
2862 in_phyaddr, out_phyaddr0, out_phyaddr1,
2863 tnr_scratch_buffer0, tnr_scratch_buffer1);
2865 cpp_frame_msg[10] = tnr_scratch_buffer1 -
2866 tnr_scratch_buffer0;
2869 rc = msm_cpp_set_group_buffer(cpp_dev, new_frame, out_phyaddr0,
2872 pr_err("%s: set group buffer failure %d\n", __func__, rc);
2876 num_stripes = new_frame->last_stripe_index -
2877 new_frame->first_stripe_index + 1;
2878 cpp_frame_msg[1] = stripe_base - 2 + num_stripes * stripe_size;
2880 frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
2882 pr_err("%s: Insufficient memory\n", __func__);
2887 atomic_set(&frame_qcmd->on_heap, 1);
2888 frame_qcmd->command = new_frame;
2889 rc = msm_cpp_send_frame_to_hardware(cpp_dev, frame_qcmd);
2891 pr_err("%s: error cannot send frame to hardware\n", __func__);
2900 if (new_frame->output_buffer_info[0].native_buff == 0)
2901 msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
2902 0x0, &buff_mgr_info);
2904 kfree(cpp_frame_msg);
2909 static int msm_cpp_cfg(struct cpp_device *cpp_dev,
2910 struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
2912 struct msm_cpp_frame_info_t *frame = NULL;
2913 struct msm_cpp_frame_info_t k_frame_info;
2916 uint32_t num_buff = sizeof(k_frame_info.output_buffer_info) /
2917 sizeof(struct msm_cpp_buffer_info_t);
2919 if (copy_from_user(&k_frame_info,
2920 (void __user *)ioctl_ptr->ioctl_ptr,
2921 sizeof(k_frame_info)))
2924 frame = msm_cpp_get_frame(ioctl_ptr);
2926 pr_err("%s: Error allocating frame\n", __func__);
2929 rc = msm_cpp_cfg_frame(cpp_dev, frame);
2931 for (i = 0; i < num_buff; i++) {
2932 k_frame_info.output_buffer_info[i] =
2933 frame->output_buffer_info[i];
2938 ioctl_ptr->trans_code = rc;
2940 if (copy_to_user((void __user *)k_frame_info.status, &rc,
2942 pr_err("error cannot copy error\n");
2945 if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
2946 &k_frame_info, sizeof(k_frame_info))) {
2947 pr_err("Error: cannot copy k_frame_info");
2954 void msm_cpp_clean_queue(struct cpp_device *cpp_dev)
2956 struct msm_queue_cmd *frame_qcmd = NULL;
2957 struct msm_cpp_frame_info_t *processed_frame = NULL;
2958 struct msm_device_queue *queue = NULL;
2960 while (cpp_dev->processing_q.len) {
2961 pr_debug("queue len:%d\n", cpp_dev->processing_q.len);
2962 queue = &cpp_dev->processing_q;
2963 frame_qcmd = msm_dequeue(queue, list_frame, POP_FRONT);
2965 processed_frame = frame_qcmd->command;
2967 if (processed_frame)
2968 kfree(processed_frame->cpp_cmd_msg);
2969 kfree(processed_frame);
2974 #ifdef CONFIG_COMPAT
2975 static int msm_cpp_copy_from_ioctl_ptr(void *dst_ptr,
2976 struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
2980 if ((ioctl_ptr->ioctl_ptr == NULL) || (ioctl_ptr->len == 0)) {
2981 pr_err("%s: Wrong ioctl_ptr %pK / len %zu\n", __func__,
2982 ioctl_ptr, ioctl_ptr->len);
2986 /* For compat task, source ptr is in kernel space */
2987 if (is_compat_task()) {
2988 memcpy(dst_ptr, ioctl_ptr->ioctl_ptr, ioctl_ptr->len);
2991 ret = copy_from_user(dst_ptr,
2992 (void __user *)ioctl_ptr->ioctl_ptr, ioctl_ptr->len);
2994 pr_err("Copy from user fail %d\n", ret);
2996 return ret ? -EFAULT : 0;
2999 static int msm_cpp_copy_from_ioctl_ptr(void *dst_ptr,
3000 struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
3004 if ((ioctl_ptr->ioctl_ptr == NULL) || (ioctl_ptr->len == 0)) {
3005 pr_err("%s: Wrong ioctl_ptr %pK / len %zu\n", __func__,
3006 ioctl_ptr, ioctl_ptr->len);
3010 ret = copy_from_user(dst_ptr,
3011 (void __user *)ioctl_ptr->ioctl_ptr, ioctl_ptr->len);
3013 pr_err("Copy from user fail %d\n", ret);
3015 return ret ? -EFAULT : 0;
3019 static int32_t msm_cpp_fw_version(struct cpp_device *cpp_dev)
3023 rc = msm_cpp_poll_rx_empty(cpp_dev->base);
3025 pr_err("%s:%d] poll rx empty failed %d",
3026 __func__, __LINE__, rc);
3029 /*Get Firmware Version*/
3030 msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
3031 msm_cpp_write(MSM_CPP_MSG_ID_CMD, cpp_dev->base);
3032 msm_cpp_write(0x1, cpp_dev->base);
3033 msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
3034 msm_cpp_write(MSM_CPP_MSG_ID_TRAILER, cpp_dev->base);
3036 rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
3038 pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
3039 MSM_CPP_MSG_ID_CMD, rc);
3042 rc = msm_cpp_poll(cpp_dev->base, 0x2);
3044 pr_err("%s:%d] poll command 0x2 failed %d", __func__, __LINE__,
3048 rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_FW_VER);
3050 pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
3051 MSM_CPP_MSG_ID_FW_VER, rc);
3055 cpp_dev->fw_version = msm_cpp_read(cpp_dev->base);
3056 pr_debug("CPP FW Version: 0x%08x\n", cpp_dev->fw_version);
3058 rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
3060 pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
3061 MSM_CPP_MSG_ID_TRAILER, rc);
3069 static int msm_cpp_validate_input(unsigned int cmd, void *arg,
3070 struct msm_camera_v4l2_ioctl_t **ioctl_ptr)
3073 case MSM_SD_SHUTDOWN:
3074 case MSM_SD_NOTIFY_FREEZE:
3075 case MSM_SD_UNNOTIFY_FREEZE:
3078 if (ioctl_ptr == NULL) {
3079 pr_err("Wrong ioctl_ptr for cmd %u\n", cmd);
3084 if (((*ioctl_ptr) == NULL) ||
3085 ((*ioctl_ptr)->ioctl_ptr == NULL) ||
3086 ((*ioctl_ptr)->len == 0)) {
3087 pr_err("Error invalid ioctl argument cmd %u", cmd);
3096 unsigned long cpp_cx_ipeak_update(struct cpp_device *cpp_dev,
3097 unsigned long clock, int idx)
3099 unsigned long clock_rate = 0;
3102 if ((clock >= cpp_dev->hw_info.freq_tbl
3103 [(cpp_dev->hw_info.freq_tbl_count) - 1]) &&
3104 (cpp_dev->turbo_vote == 0)) {
3105 ret = cx_ipeak_update(cpp_dev->cpp_cx_ipeak, true);
3107 pr_err("cx_ipeak voting failed setting clock below turbo");
3108 clock = cpp_dev->hw_info.freq_tbl
3109 [(cpp_dev->hw_info.freq_tbl_count) - 2];
3111 cpp_dev->turbo_vote = 1;
3113 clock_rate = msm_cpp_set_core_clk(cpp_dev, clock, idx);
3114 } else if (clock < cpp_dev->hw_info.freq_tbl
3115 [(cpp_dev->hw_info.freq_tbl_count) - 1]) {
3116 clock_rate = msm_cpp_set_core_clk(cpp_dev, clock, idx);
3117 if (cpp_dev->turbo_vote == 1) {
3118 ret = cx_ipeak_update(cpp_dev->cpp_cx_ipeak, false);
3120 pr_err("cx_ipeak unvoting failed");
3122 cpp_dev->turbo_vote = 0;
3128 long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
3129 unsigned int cmd, void *arg)
3131 struct cpp_device *cpp_dev = NULL;
3132 struct msm_camera_v4l2_ioctl_t *ioctl_ptr = NULL;
3136 pr_err("sd %pK\n", sd);
3139 cpp_dev = v4l2_get_subdevdata(sd);
3140 if (cpp_dev == NULL) {
3141 pr_err("cpp_dev is null\n");
3145 if (_IOC_DIR(cmd) == _IOC_NONE) {
3146 pr_err("Invalid ioctl/subdev cmd %u", cmd);
3150 rc = msm_cpp_validate_input(cmd, arg, &ioctl_ptr);
3152 pr_err("input validation failed\n");
3155 mutex_lock(&cpp_dev->mutex);
3157 CPP_DBG("E cmd: 0x%x\n", cmd);
3159 case VIDIOC_MSM_CPP_GET_HW_INFO: {
3160 CPP_DBG("VIDIOC_MSM_CPP_GET_HW_INFO\n");
3161 if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
3163 sizeof(struct cpp_hw_info))) {
3164 mutex_unlock(&cpp_dev->mutex);
3170 case VIDIOC_MSM_CPP_LOAD_FIRMWARE: {
3171 CPP_DBG("VIDIOC_MSM_CPP_LOAD_FIRMWARE\n");
3172 if (cpp_dev->is_firmware_loaded == 0) {
3173 if (cpp_dev->fw_name_bin != NULL) {
3174 kfree(cpp_dev->fw_name_bin);
3175 cpp_dev->fw_name_bin = NULL;
3178 release_firmware(cpp_dev->fw);
3181 if ((ioctl_ptr->len == 0) ||
3182 (ioctl_ptr->len > MSM_CPP_MAX_FW_NAME_LEN)) {
3183 pr_err("ioctl_ptr->len is 0\n");
3184 mutex_unlock(&cpp_dev->mutex);
3187 cpp_dev->fw_name_bin = kzalloc(ioctl_ptr->len+1,
3189 if (!cpp_dev->fw_name_bin) {
3190 pr_err("%s:%d: malloc error\n", __func__,
3192 mutex_unlock(&cpp_dev->mutex);
3195 if (ioctl_ptr->ioctl_ptr == NULL) {
3196 pr_err("ioctl_ptr->ioctl_ptr=NULL\n");
3197 kfree(cpp_dev->fw_name_bin);
3198 cpp_dev->fw_name_bin = NULL;
3199 mutex_unlock(&cpp_dev->mutex);
3202 rc = (copy_from_user(cpp_dev->fw_name_bin,
3203 (void __user *)ioctl_ptr->ioctl_ptr,
3204 ioctl_ptr->len) ? -EFAULT : 0);
3206 ERR_COPY_FROM_USER();
3207 kfree(cpp_dev->fw_name_bin);
3208 cpp_dev->fw_name_bin = NULL;
3209 mutex_unlock(&cpp_dev->mutex);
3212 *(cpp_dev->fw_name_bin+ioctl_ptr->len) = '\0';
3213 rc = request_firmware(&cpp_dev->fw,
3214 cpp_dev->fw_name_bin,
3215 &cpp_dev->pdev->dev);
3217 dev_err(&cpp_dev->pdev->dev,
3218 "Fail to loc blob %s dev %pK, rc:%d\n",
3219 cpp_dev->fw_name_bin,
3220 &cpp_dev->pdev->dev, rc);
3221 kfree(cpp_dev->fw_name_bin);
3222 cpp_dev->fw_name_bin = NULL;
3224 mutex_unlock(&cpp_dev->mutex);
3227 msm_camera_enable_irq(cpp_dev->irq, false);
3228 rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
3230 pr_err("%s: load firmware failure %d-retry\n",
3232 rc = msm_cpp_reset_vbif_and_load_fw(cpp_dev);
3234 enable_irq(cpp_dev->irq->start);
3235 mutex_unlock(&cpp_dev->mutex);
3239 rc = msm_cpp_fw_version(cpp_dev);
3241 pr_err("%s: get firmware failure %d\n",
3243 enable_irq(cpp_dev->irq->start);
3244 mutex_unlock(&cpp_dev->mutex);
3247 msm_camera_enable_irq(cpp_dev->irq, true);
3248 cpp_dev->is_firmware_loaded = 1;
3252 case VIDIOC_MSM_CPP_CFG:
3253 CPP_DBG("VIDIOC_MSM_CPP_CFG\n");
3254 rc = msm_cpp_cfg(cpp_dev, ioctl_ptr);
3256 case VIDIOC_MSM_CPP_FLUSH_QUEUE:
3257 CPP_DBG("VIDIOC_MSM_CPP_FLUSH_QUEUE\n");
3258 rc = msm_cpp_flush_frames(cpp_dev);
3260 case VIDIOC_MSM_CPP_DELETE_STREAM_BUFF:
3261 case VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO:
3262 case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO: {
3264 struct msm_cpp_stream_buff_info_t *u_stream_buff_info = NULL;
3265 struct msm_cpp_stream_buff_info_t k_stream_buff_info;
3266 struct msm_cpp_buff_queue_info_t *buff_queue_info = NULL;
3268 memset(&k_stream_buff_info, 0, sizeof(k_stream_buff_info));
3269 CPP_DBG("VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO\n");
3270 if (sizeof(struct msm_cpp_stream_buff_info_t) !=
3272 pr_err("%s:%d: invalid length\n", __func__, __LINE__);
3273 mutex_unlock(&cpp_dev->mutex);
3276 u_stream_buff_info = kzalloc(ioctl_ptr->len, GFP_KERNEL);
3277 if (!u_stream_buff_info) {
3278 pr_err("%s:%d: malloc error\n", __func__, __LINE__);
3279 mutex_unlock(&cpp_dev->mutex);
3283 rc = msm_cpp_copy_from_ioctl_ptr(u_stream_buff_info,
3286 ERR_COPY_FROM_USER();
3287 kfree(u_stream_buff_info);
3288 mutex_unlock(&cpp_dev->mutex);
3291 k_stream_buff_info.num_buffs = u_stream_buff_info->num_buffs;
3292 k_stream_buff_info.identity = u_stream_buff_info->identity;
3294 if (k_stream_buff_info.num_buffs > MSM_CAMERA_MAX_STREAM_BUF) {
3295 pr_err("%s:%d: unexpected large num buff requested\n",
3296 __func__, __LINE__);
3297 kfree(u_stream_buff_info);
3298 mutex_unlock(&cpp_dev->mutex);
3302 if (u_stream_buff_info->num_buffs != 0) {
3303 k_stream_buff_info.buffer_info =
3304 kzalloc(k_stream_buff_info.num_buffs *
3305 sizeof(struct msm_cpp_buffer_info_t),
3307 if (ZERO_OR_NULL_PTR(k_stream_buff_info.buffer_info)) {
3308 pr_err("%s:%d: malloc error\n",
3309 __func__, __LINE__);
3310 kfree(u_stream_buff_info);
3311 mutex_unlock(&cpp_dev->mutex);
3315 rc = (copy_from_user(k_stream_buff_info.buffer_info,
3316 (void __user *)u_stream_buff_info->buffer_info,
3317 k_stream_buff_info.num_buffs *
3318 sizeof(struct msm_cpp_buffer_info_t)) ?
3321 ERR_COPY_FROM_USER();
3322 kfree(k_stream_buff_info.buffer_info);
3323 kfree(u_stream_buff_info);
3324 mutex_unlock(&cpp_dev->mutex);
3329 buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
3330 (k_stream_buff_info.identity >> 16) & 0xFFFF,
3331 k_stream_buff_info.identity & 0xFFFF);
3333 if (buff_queue_info == NULL) {
3334 if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF)
3335 goto STREAM_BUFF_END;
3337 rc = msm_cpp_add_buff_queue_entry(cpp_dev,
3338 ((k_stream_buff_info.identity >> 16) & 0xFFFF),
3339 (k_stream_buff_info.identity & 0xFFFF));
3342 goto STREAM_BUFF_END;
3344 if (cpp_dev->stream_cnt == 0) {
3345 cpp_dev->state = CPP_STATE_ACTIVE;
3346 msm_cpp_clear_timer(cpp_dev);
3347 msm_cpp_clean_queue(cpp_dev);
3349 cpp_dev->stream_cnt++;
3350 CPP_DBG("stream_cnt:%d\n", cpp_dev->stream_cnt);
3352 buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
3353 ((k_stream_buff_info.identity >> 16) & 0xFFFF),
3354 (k_stream_buff_info.identity & 0xFFFF));
3355 if (buff_queue_info == NULL) {
3356 pr_err("error finding buffer queue entry identity:%d\n",
3357 k_stream_buff_info.identity);
3358 kfree(k_stream_buff_info.buffer_info);
3359 kfree(u_stream_buff_info);
3360 cpp_dev->stream_cnt--;
3361 mutex_unlock(&cpp_dev->mutex);
3364 if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF) {
3365 for (j = 0; j < k_stream_buff_info.num_buffs; j++) {
3366 msm_cpp_dequeue_buff(cpp_dev, buff_queue_info,
3367 k_stream_buff_info.buffer_info[j].index,
3368 k_stream_buff_info.buffer_info[j].native_buff);
3371 for (j = 0; j < k_stream_buff_info.num_buffs; j++) {
3372 msm_cpp_queue_buffer_info(cpp_dev,
3374 &k_stream_buff_info.buffer_info[j]);
3379 kfree(k_stream_buff_info.buffer_info);
3380 kfree(u_stream_buff_info);
3384 case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO: {
3386 struct msm_cpp_buff_queue_info_t *buff_queue_info;
3388 CPP_DBG("VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO\n");
3389 if (ioctl_ptr->len != sizeof(uint32_t)) {
3390 mutex_unlock(&cpp_dev->mutex);
3394 rc = msm_cpp_copy_from_ioctl_ptr(&identity, ioctl_ptr);
3396 ERR_COPY_FROM_USER();
3397 mutex_unlock(&cpp_dev->mutex);
3401 buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
3402 ((identity >> 16) & 0xFFFF), (identity & 0xFFFF));
3403 if (buff_queue_info == NULL) {
3404 pr_err("error finding buffer queue entry for identity:%d\n",
3406 mutex_unlock(&cpp_dev->mutex);
3410 msm_cpp_dequeue_buff_info_list(cpp_dev, buff_queue_info);
3411 rc = msm_cpp_free_buff_queue_entry(cpp_dev,
3412 buff_queue_info->session_id,
3413 buff_queue_info->stream_id);
3414 if (cpp_dev->stream_cnt > 0) {
3415 cpp_dev->stream_cnt--;
3416 pr_debug("stream_cnt:%d\n", cpp_dev->stream_cnt);
3417 if (cpp_dev->stream_cnt == 0) {
3418 rc = msm_cpp_update_bandwidth_setting(cpp_dev,
3421 pr_err("Bandwidth Reset Failed!\n");
3422 cpp_dev->state = CPP_STATE_IDLE;
3423 msm_cpp_clear_timer(cpp_dev);
3424 msm_cpp_clean_queue(cpp_dev);
3427 pr_err("error: stream count underflow %d\n",
3428 cpp_dev->stream_cnt);
3432 case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
3433 struct msm_device_queue *queue = &cpp_dev->eventData_q;
3434 struct msm_queue_cmd *event_qcmd;
3435 struct msm_cpp_frame_info_t *process_frame;
3437 CPP_DBG("VIDIOC_MSM_CPP_GET_EVENTPAYLOAD\n");
3438 event_qcmd = msm_dequeue(queue, list_eventdata, POP_FRONT);
3440 pr_err("no queue cmd available");
3441 mutex_unlock(&cpp_dev->mutex);
3444 process_frame = event_qcmd->command;
3445 CPP_DBG("fid %d\n", process_frame->frame_id);
3446 if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
3448 sizeof(struct msm_cpp_frame_info_t))) {
3449 mutex_unlock(&cpp_dev->mutex);
3450 kfree(process_frame->cpp_cmd_msg);
3451 kfree(process_frame);
3456 kfree(process_frame->cpp_cmd_msg);
3457 kfree(process_frame);
3461 case VIDIOC_MSM_CPP_SET_CLOCK: {
3462 uint32_t msm_cpp_core_clk_idx;
3463 struct msm_cpp_clock_settings_t clock_settings;
3464 unsigned long clock_rate = 0;
3466 CPP_DBG("VIDIOC_MSM_CPP_SET_CLOCK\n");
3467 if (ioctl_ptr->len == 0) {
3468 pr_err("ioctl_ptr->len is 0\n");
3469 mutex_unlock(&cpp_dev->mutex);
3473 if (ioctl_ptr->ioctl_ptr == NULL) {
3474 pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
3475 mutex_unlock(&cpp_dev->mutex);
3479 if (ioctl_ptr->len != sizeof(struct msm_cpp_clock_settings_t)) {
3480 pr_err("Not valid ioctl_ptr->len\n");
3481 mutex_unlock(&cpp_dev->mutex);
3485 rc = msm_cpp_copy_from_ioctl_ptr(&clock_settings, ioctl_ptr);
3487 ERR_COPY_FROM_USER();
3488 mutex_unlock(&cpp_dev->mutex);
3492 if (clock_settings.clock_rate > 0) {
3493 msm_cpp_core_clk_idx = msm_cpp_get_clock_index(cpp_dev,
3495 if (msm_cpp_core_clk_idx < 0) {
3496 pr_err(" Fail to get clock index\n");
3497 mutex_unlock(&cpp_dev->mutex);
3500 rc = msm_cpp_update_bandwidth_setting(cpp_dev,
3502 clock_settings.inst);
3504 pr_err("Bandwidth Set Failed!\n");
3505 rc = msm_cpp_update_bandwidth_setting(cpp_dev,
3507 mutex_unlock(&cpp_dev->mutex);
3510 if (cpp_dev->cpp_cx_ipeak) {
3511 clock_rate = cpp_cx_ipeak_update(cpp_dev,
3512 clock_settings.clock_rate,
3513 msm_cpp_core_clk_idx);
3515 clock_rate = msm_cpp_set_core_clk(cpp_dev,
3516 clock_settings.clock_rate,
3517 msm_cpp_core_clk_idx);
3520 pr_err("Fail to set core clk\n");
3521 mutex_unlock(&cpp_dev->mutex);
3524 if (clock_rate != clock_settings.clock_rate)
3525 pr_err("clock rate differ from settings\n");
3526 msm_isp_util_update_clk_rate(clock_settings.clock_rate);
3530 case MSM_SD_NOTIFY_FREEZE:
3532 case MSM_SD_UNNOTIFY_FREEZE:
3534 case MSM_SD_SHUTDOWN:
3535 CPP_DBG("MSM_SD_SHUTDOWN\n");
3536 mutex_unlock(&cpp_dev->mutex);
3537 pr_warn("shutdown cpp node. open cnt:%d\n",
3538 cpp_dev->cpp_open_cnt);
3540 if (atomic_read(&cpp_timer.used))
3541 pr_debug("Timer state not cleared\n");
3543 while (cpp_dev->cpp_open_cnt != 0)
3544 cpp_close_node(sd, NULL);
3545 mutex_lock(&cpp_dev->mutex);
3548 case VIDIOC_MSM_CPP_QUEUE_BUF: {
3549 struct msm_pproc_queue_buf_info queue_buf_info;
3551 CPP_DBG("VIDIOC_MSM_CPP_QUEUE_BUF\n");
3553 if (ioctl_ptr->len != sizeof(struct msm_pproc_queue_buf_info)) {
3554 pr_err("%s: Not valid ioctl_ptr->len\n", __func__);
3555 mutex_unlock(&cpp_dev->mutex);
3558 rc = msm_cpp_copy_from_ioctl_ptr(&queue_buf_info, ioctl_ptr);
3560 ERR_COPY_FROM_USER();
3564 if (queue_buf_info.is_buf_dirty) {
3565 rc = msm_cpp_buffer_ops(cpp_dev,
3566 VIDIOC_MSM_BUF_MNGR_PUT_BUF,
3567 0x0, &queue_buf_info.buff_mgr_info);
3569 rc = msm_cpp_buffer_ops(cpp_dev,
3570 VIDIOC_MSM_BUF_MNGR_BUF_DONE,
3571 0x0, &queue_buf_info.buff_mgr_info);
3574 pr_err("error in buf done\n");
3580 case VIDIOC_MSM_CPP_POP_STREAM_BUFFER: {
3581 struct msm_buf_mngr_info buff_mgr_info;
3582 struct msm_cpp_frame_info_t frame_info;
3583 uint32_t ioctl_cmd, idx;
3584 if (ioctl_ptr->ioctl_ptr == NULL ||
3586 sizeof(struct msm_cpp_frame_info_t))) {
3591 rc = msm_cpp_copy_from_ioctl_ptr(&frame_info, ioctl_ptr);
3593 ERR_COPY_FROM_USER();
3597 memset(&buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
3598 buff_mgr_info.session_id =
3599 ((frame_info.identity >> 16) & 0xFFFF);
3600 buff_mgr_info.stream_id = (frame_info.identity & 0xFFFF);
3601 buff_mgr_info.type =
3602 MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
3603 if (IS_DEFAULT_OUTPUT_BUF_INDEX(
3604 frame_info.output_buffer_info[0].index)) {
3605 ioctl_cmd = VIDIOC_MSM_BUF_MNGR_GET_BUF;
3608 ioctl_cmd = VIDIOC_MSM_BUF_MNGR_IOCTL_CMD;
3609 idx = MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX;
3611 rc = msm_cpp_buffer_ops(cpp_dev, ioctl_cmd, idx,
3615 pr_err_ratelimited("POP: get_buf err rc:%d, index %d\n",
3616 rc, frame_info.output_buffer_info[0].index);
3619 buff_mgr_info.frame_id = frame_info.frame_id;
3620 rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_BUF_DONE,
3621 0x0, &buff_mgr_info);
3623 pr_err("error in buf done\n");
3629 pr_err_ratelimited("invalid value: cmd=0x%x\n", cmd);
3631 case VIDIOC_MSM_CPP_IOMMU_ATTACH: {
3632 if (cpp_dev->iommu_state == CPP_IOMMU_STATE_DETACHED) {
3633 int32_t stall_disable;
3634 struct msm_camera_smmu_attach_type cpp_attach_info;
3636 if (ioctl_ptr->len !=
3637 sizeof(struct msm_camera_smmu_attach_type)) {
3642 memset(&cpp_attach_info, 0, sizeof(cpp_attach_info));
3643 rc = msm_cpp_copy_from_ioctl_ptr(&cpp_attach_info,
3646 pr_err("CPP_IOMMU_ATTACH copy from user fail");
3650 cpp_dev->security_mode = cpp_attach_info.attach;
3652 /* disable smmu stall on fault */
3653 cam_smmu_set_attr(cpp_dev->iommu_hdl,
3654 DOMAIN_ATTR_CB_STALL_DISABLE, &stall_disable);
3655 if (cpp_dev->security_mode == SECURE_MODE) {
3656 rc = cam_smmu_ops(cpp_dev->iommu_hdl,
3657 CAM_SMMU_ATTACH_SEC_CPP);
3659 rc = cam_smmu_ops(cpp_dev->iommu_hdl,
3663 pr_err("%s:%diommu_attach_device failed\n",
3664 __func__, __LINE__);
3668 cpp_dev->iommu_state = CPP_IOMMU_STATE_ATTACHED;
3670 pr_err("%s:%d IOMMMU attach triggered in invalid state\n",
3671 __func__, __LINE__);
3676 case VIDIOC_MSM_CPP_IOMMU_DETACH: {
3677 if ((cpp_dev->iommu_state == CPP_IOMMU_STATE_ATTACHED) &&
3678 (cpp_dev->stream_cnt == 0)) {
3679 struct msm_camera_smmu_attach_type cpp_attach_info;
3681 if (ioctl_ptr->len !=
3682 sizeof(struct msm_camera_smmu_attach_type)) {
3687 memset(&cpp_attach_info, 0, sizeof(cpp_attach_info));
3688 rc = msm_cpp_copy_from_ioctl_ptr(&cpp_attach_info,
3691 pr_err("CPP_IOMMU_DETTACH copy from user fail");
3695 cpp_dev->security_mode = cpp_attach_info.attach;
3697 if (cpp_dev->security_mode == SECURE_MODE)
3698 rc = cam_smmu_ops(cpp_dev->iommu_hdl,
3699 CAM_SMMU_DETACH_SEC_CPP);
3701 rc = cam_smmu_ops(cpp_dev->iommu_hdl,
3704 pr_err("%s:%diommu detach failed\n", __func__,
3709 cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
3711 pr_err("%s:%d IOMMMU attach triggered in invalid state\n",
3712 __func__, __LINE__);
3718 mutex_unlock(&cpp_dev->mutex);
3723 int msm_cpp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
3724 struct v4l2_event_subscription *sub)
3726 CPP_DBG("Called\n");
3727 return v4l2_event_subscribe(fh, sub, MAX_CPP_V4l2_EVENTS, NULL);
3730 int msm_cpp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
3731 struct v4l2_event_subscription *sub)
3733 CPP_DBG("Called\n");
3734 return v4l2_event_unsubscribe(fh, sub);
3737 static struct v4l2_subdev_core_ops msm_cpp_subdev_core_ops = {
3738 .ioctl = msm_cpp_subdev_ioctl,
3739 .subscribe_event = msm_cpp_subscribe_event,
3740 .unsubscribe_event = msm_cpp_unsubscribe_event,
3743 static const struct v4l2_subdev_ops msm_cpp_subdev_ops = {
3744 .core = &msm_cpp_subdev_core_ops,
3747 static long msm_cpp_subdev_do_ioctl(
3748 struct file *file, unsigned int cmd, void *arg)
3750 struct video_device *vdev;
3751 struct v4l2_subdev *sd;
3752 struct v4l2_fh *vfh = NULL;
3754 if ((arg == NULL) || (file == NULL)) {
3755 pr_err("Invalid input parameters arg %pK, file %pK\n",
3759 vdev = video_devdata(file);
3760 sd = vdev_to_v4l2_subdev(vdev);
3763 pr_err("Invalid input parameter sd %pK\n", sd);
3766 vfh = file->private_data;
3769 case VIDIOC_DQEVENT:
3770 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
3771 return -ENOIOCTLCMD;
3773 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
3775 case VIDIOC_SUBSCRIBE_EVENT:
3776 return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
3778 case VIDIOC_UNSUBSCRIBE_EVENT:
3779 return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
3781 case VIDIOC_MSM_CPP_GET_INST_INFO: {
3783 struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
3784 struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
3785 struct msm_cpp_frame_info_t inst_info;
3787 memset(&inst_info, 0, sizeof(struct msm_cpp_frame_info_t));
3788 for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
3789 if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
3790 inst_info.inst_id = i;
3795 (void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
3796 sizeof(struct msm_cpp_frame_info_t))) {
3802 return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
3808 static long msm_cpp_subdev_fops_ioctl(struct file *file, unsigned int cmd,
3811 return video_usercopy(file, cmd, arg, msm_cpp_subdev_do_ioctl);
3815 #ifdef CONFIG_COMPAT
3816 static struct msm_cpp_frame_info_t *get_64bit_cpp_frame_from_compat(
3817 struct msm_camera_v4l2_ioctl_t *kp_ioctl)
3819 struct msm_cpp_frame_info32_t *new_frame32 = NULL;
3820 struct msm_cpp_frame_info_t *new_frame = NULL;
3821 uint32_t *cpp_frame_msg;
3822 void *cpp_cmd_msg_64bit;
3825 new_frame32 = kzalloc(sizeof(struct msm_cpp_frame_info32_t),
3828 pr_err("Insufficient memory\n");
3831 new_frame = kzalloc(sizeof(struct msm_cpp_frame_info_t), GFP_KERNEL);
3833 pr_err("Insufficient memory\n");
3837 rc = (copy_from_user(new_frame32, (void __user *)kp_ioctl->ioctl_ptr,
3838 sizeof(struct msm_cpp_frame_info32_t)) ? -EFAULT : 0);
3840 ERR_COPY_FROM_USER();
3844 new_frame->frame_id = new_frame32->frame_id;
3845 new_frame->inst_id = new_frame32->inst_id;
3846 new_frame->client_id = new_frame32->client_id;
3847 new_frame->frame_type = new_frame32->frame_type;
3848 new_frame->num_strips = new_frame32->num_strips;
3850 new_frame->src_fd = new_frame32->src_fd;
3851 new_frame->dst_fd = new_frame32->dst_fd;
3853 new_frame->timestamp.tv_sec =
3854 (unsigned long)new_frame32->timestamp.tv_sec;
3855 new_frame->timestamp.tv_usec =
3856 (unsigned long)new_frame32->timestamp.tv_usec;
3858 new_frame->in_time.tv_sec =
3859 (unsigned long)new_frame32->in_time.tv_sec;
3860 new_frame->in_time.tv_usec =
3861 (unsigned long)new_frame32->in_time.tv_usec;
3863 new_frame->out_time.tv_sec =
3864 (unsigned long)new_frame32->out_time.tv_sec;
3865 new_frame->out_time.tv_usec =
3866 (unsigned long)new_frame32->out_time.tv_usec;
3868 new_frame->msg_len = new_frame32->msg_len;
3869 new_frame->identity = new_frame32->identity;
3870 new_frame->input_buffer_info = new_frame32->input_buffer_info;
3871 new_frame->output_buffer_info[0] =
3872 new_frame32->output_buffer_info[0];
3873 new_frame->output_buffer_info[1] =
3874 new_frame32->output_buffer_info[1];
3875 new_frame->output_buffer_info[2] =
3876 new_frame32->output_buffer_info[2];
3877 new_frame->output_buffer_info[3] =
3878 new_frame32->output_buffer_info[3];
3879 new_frame->output_buffer_info[4] =
3880 new_frame32->output_buffer_info[4];
3881 new_frame->output_buffer_info[5] =
3882 new_frame32->output_buffer_info[5];
3883 new_frame->output_buffer_info[6] =
3884 new_frame32->output_buffer_info[6];
3885 new_frame->output_buffer_info[7] =
3886 new_frame32->output_buffer_info[7];
3887 new_frame->duplicate_buffer_info =
3888 new_frame32->duplicate_buffer_info;
3889 new_frame->tnr_scratch_buffer_info[0] =
3890 new_frame32->tnr_scratch_buffer_info[0];
3891 new_frame->tnr_scratch_buffer_info[1] =
3892 new_frame32->tnr_scratch_buffer_info[1];
3893 new_frame->duplicate_output = new_frame32->duplicate_output;
3894 new_frame->we_disable = new_frame32->we_disable;
3895 new_frame->duplicate_identity = new_frame32->duplicate_identity;
3896 new_frame->feature_mask = new_frame32->feature_mask;
3897 new_frame->partial_frame_indicator =
3898 new_frame32->partial_frame_indicator;
3899 new_frame->first_payload = new_frame32->first_payload;
3900 new_frame->last_payload = new_frame32->last_payload;
3901 new_frame->first_stripe_index = new_frame32->first_stripe_index;
3902 new_frame->last_stripe_index = new_frame32->last_stripe_index;
3903 new_frame->stripe_info_offset =
3904 new_frame32->stripe_info_offset;
3905 new_frame->stripe_info = new_frame32->stripe_info;
3906 new_frame->batch_info.batch_mode =
3907 new_frame32->batch_info.batch_mode;
3908 new_frame->batch_info.batch_size =
3909 new_frame32->batch_info.batch_size;
3910 new_frame->batch_info.cont_idx =
3911 new_frame32->batch_info.cont_idx;
3912 for (i = 0; i < MAX_PLANES; i++)
3913 new_frame->batch_info.intra_plane_offset[i] =
3914 new_frame32->batch_info.intra_plane_offset[i];
3915 new_frame->batch_info.pick_preview_idx =
3916 new_frame32->batch_info.pick_preview_idx;
3918 /* Convert the 32 bit pointer to 64 bit pointer */
3919 new_frame->cookie = compat_ptr(new_frame32->cookie);
3920 cpp_cmd_msg_64bit = compat_ptr(new_frame32->cpp_cmd_msg);
3921 if ((new_frame->msg_len == 0) ||
3922 (new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
3923 pr_err("%s:%d: Invalid frame len:%d\n", __func__,
3924 __LINE__, new_frame->msg_len);
3928 cpp_frame_msg = kzalloc(sizeof(uint32_t)*new_frame->msg_len,
3930 if (!cpp_frame_msg) {
3931 pr_err("Insufficient memory\n");
3935 rc = (copy_from_user(cpp_frame_msg,
3936 (void __user *)cpp_cmd_msg_64bit,
3937 sizeof(uint32_t)*new_frame->msg_len) ? -EFAULT : 0);
3939 ERR_COPY_FROM_USER();
3942 new_frame->cpp_cmd_msg = cpp_frame_msg;
3948 kfree(cpp_frame_msg);
3957 static void get_compat_frame_from_64bit(struct msm_cpp_frame_info_t *frame,
3958 struct msm_cpp_frame_info32_t *k32_frame)
3962 k32_frame->frame_id = frame->frame_id;
3963 k32_frame->inst_id = frame->inst_id;
3964 k32_frame->client_id = frame->client_id;
3965 k32_frame->frame_type = frame->frame_type;
3966 k32_frame->num_strips = frame->num_strips;
3968 k32_frame->src_fd = frame->src_fd;
3969 k32_frame->dst_fd = frame->dst_fd;
3971 k32_frame->timestamp.tv_sec = (uint32_t)frame->timestamp.tv_sec;
3972 k32_frame->timestamp.tv_usec = (uint32_t)frame->timestamp.tv_usec;
3974 k32_frame->in_time.tv_sec = (uint32_t)frame->in_time.tv_sec;
3975 k32_frame->in_time.tv_usec = (uint32_t)frame->in_time.tv_usec;
3977 k32_frame->out_time.tv_sec = (uint32_t)frame->out_time.tv_sec;
3978 k32_frame->out_time.tv_usec = (uint32_t)frame->out_time.tv_usec;
3980 k32_frame->msg_len = frame->msg_len;
3981 k32_frame->identity = frame->identity;
3982 k32_frame->input_buffer_info = frame->input_buffer_info;
3983 k32_frame->output_buffer_info[0] = frame->output_buffer_info[0];
3984 k32_frame->output_buffer_info[1] = frame->output_buffer_info[1];
3985 k32_frame->output_buffer_info[2] = frame->output_buffer_info[2];
3986 k32_frame->output_buffer_info[3] = frame->output_buffer_info[3];
3987 k32_frame->output_buffer_info[4] = frame->output_buffer_info[4];
3988 k32_frame->output_buffer_info[5] = frame->output_buffer_info[5];
3989 k32_frame->output_buffer_info[6] = frame->output_buffer_info[6];
3990 k32_frame->output_buffer_info[7] = frame->output_buffer_info[7];
3991 k32_frame->duplicate_buffer_info = frame->duplicate_buffer_info;
3992 k32_frame->duplicate_output = frame->duplicate_output;
3993 k32_frame->we_disable = frame->we_disable;
3994 k32_frame->duplicate_identity = frame->duplicate_identity;
3995 k32_frame->feature_mask = frame->feature_mask;
3996 k32_frame->cookie = ptr_to_compat(frame->cookie);
3997 k32_frame->partial_frame_indicator = frame->partial_frame_indicator;
3998 k32_frame->first_payload = frame->first_payload;
3999 k32_frame->last_payload = frame->last_payload;
4000 k32_frame->first_stripe_index = frame->first_stripe_index;
4001 k32_frame->last_stripe_index = frame->last_stripe_index;
4002 k32_frame->stripe_info_offset = frame->stripe_info_offset;
4003 k32_frame->stripe_info = frame->stripe_info;
4004 k32_frame->batch_info.batch_mode = frame->batch_info.batch_mode;
4005 k32_frame->batch_info.batch_size = frame->batch_info.batch_size;
4006 k32_frame->batch_info.cont_idx = frame->batch_info.cont_idx;
4007 for (i = 0; i < MAX_PLANES; i++)
4008 k32_frame->batch_info.intra_plane_offset[i] =
4009 frame->batch_info.intra_plane_offset[i];
4010 k32_frame->batch_info.pick_preview_idx =
4011 frame->batch_info.pick_preview_idx;
4014 static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
4015 unsigned int cmd, unsigned long arg)
4017 struct video_device *vdev = video_devdata(file);
4018 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
4019 struct cpp_device *cpp_dev = NULL;
4022 struct msm_camera_v4l2_ioctl_t kp_ioctl;
4023 struct msm_camera_v4l2_ioctl32_t up32_ioctl;
4024 struct msm_cpp_clock_settings_t clock_settings;
4025 struct msm_pproc_queue_buf_info k_queue_buf;
4026 struct msm_cpp_stream_buff_info_t k_cpp_buff_info;
4027 struct msm_cpp_frame_info32_t k32_frame_info;
4028 struct msm_cpp_frame_info_t k64_frame_info;
4029 struct msm_camera_smmu_attach_type kb_cpp_smmu_attach_info;
4030 uint32_t identity_k = 0;
4031 bool is_copytouser_req = true;
4032 void __user *up = (void __user *)arg;
4035 pr_err("%s: Subdevice is NULL\n", __func__);
4038 cpp_dev = v4l2_get_subdevdata(sd);
4039 if (!vdev || !cpp_dev) {
4040 pr_err("Invalid vdev %pK or cpp_dev %pK structures!",
4044 mutex_lock(&cpp_dev->mutex);
4046 * copy the user space 32 bit pointer to kernel space 32 bit compat
4049 if (copy_from_user(&up32_ioctl, (void __user *)up,
4050 sizeof(up32_ioctl))) {
4051 mutex_unlock(&cpp_dev->mutex);
4055 /* copy the data from 32 bit compat to kernel space 64 bit pointer */
4056 kp_ioctl.id = up32_ioctl.id;
4057 kp_ioctl.len = up32_ioctl.len;
4058 kp_ioctl.trans_code = up32_ioctl.trans_code;
4059 /* Convert the 32 bit pointer to 64 bit pointer */
4060 kp_ioctl.ioctl_ptr = compat_ptr(up32_ioctl.ioctl_ptr);
4061 if (!kp_ioctl.ioctl_ptr) {
4062 pr_err("%s: Invalid ioctl pointer\n", __func__);
4063 mutex_unlock(&cpp_dev->mutex);
4068 * Convert 32 bit IOCTL ID's to 64 bit IOCTL ID's
4069 * except VIDIOC_MSM_CPP_CFG32, which needs special
4073 case VIDIOC_MSM_CPP_CFG32:
4075 struct msm_cpp_frame_info32_t k32_frame_info;
4076 struct msm_cpp_frame_info_t *cpp_frame = NULL;
4079 if (copy_from_user(&k32_frame_info,
4080 (void __user *)kp_ioctl.ioctl_ptr,
4081 sizeof(k32_frame_info))) {
4082 mutex_unlock(&cpp_dev->mutex);
4085 /* Get the cpp frame pointer */
4086 cpp_frame = get_64bit_cpp_frame_from_compat(&kp_ioctl);
4088 /* Configure the cpp frame */
4090 rc = msm_cpp_cfg_frame(cpp_dev, cpp_frame);
4091 /* Cpp_frame can be free'd by cfg_frame in error */
4093 k32_frame_info.output_buffer_info[0] =
4094 cpp_frame->output_buffer_info[0];
4095 k32_frame_info.output_buffer_info[1] =
4096 cpp_frame->output_buffer_info[1];
4099 pr_err("%s: Error getting frame\n", __func__);
4100 mutex_unlock(&cpp_dev->mutex);
4104 kp_ioctl.trans_code = rc;
4106 /* Convert the 32 bit pointer to 64 bit pointer */
4107 status = compat_ptr(k32_frame_info.status);
4109 if (copy_to_user((void __user *)status, &rc,
4111 pr_err("error cannot copy error\n");
4113 if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
4115 sizeof(k32_frame_info))) {
4116 mutex_unlock(&cpp_dev->mutex);
4120 cmd = VIDIOC_MSM_CPP_CFG;
4123 case VIDIOC_MSM_CPP_GET_HW_INFO32:
4125 struct cpp_hw_info_32_t u32_cpp_hw_info;
4128 u32_cpp_hw_info.cpp_hw_version =
4129 cpp_dev->hw_info.cpp_hw_version;
4130 u32_cpp_hw_info.cpp_hw_caps = cpp_dev->hw_info.cpp_hw_caps;
4131 memset(&u32_cpp_hw_info.freq_tbl, 0x00,
4132 sizeof(u32_cpp_hw_info.freq_tbl));
4133 for (i = 0; i < cpp_dev->hw_info.freq_tbl_count; i++)
4134 u32_cpp_hw_info.freq_tbl[i] =
4135 cpp_dev->hw_info.freq_tbl[i];
4137 u32_cpp_hw_info.freq_tbl_count =
4138 cpp_dev->hw_info.freq_tbl_count;
4139 if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
4140 &u32_cpp_hw_info, sizeof(struct cpp_hw_info_32_t))) {
4141 mutex_unlock(&cpp_dev->mutex);
4145 cmd = VIDIOC_MSM_CPP_GET_HW_INFO;
4148 case VIDIOC_MSM_CPP_LOAD_FIRMWARE32:
4149 cmd = VIDIOC_MSM_CPP_LOAD_FIRMWARE;
4151 case VIDIOC_MSM_CPP_GET_INST_INFO32:
4153 struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
4154 struct msm_cpp_frame_info32_t inst_info;
4155 struct v4l2_fh *vfh = NULL;
4158 vfh = file->private_data;
4159 memset(&inst_info, 0, sizeof(struct msm_cpp_frame_info32_t));
4160 for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
4161 if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
4162 inst_info.inst_id = i;
4166 if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
4167 &inst_info, sizeof(struct msm_cpp_frame_info32_t))) {
4168 mutex_unlock(&cpp_dev->mutex);
4171 cmd = VIDIOC_MSM_CPP_GET_INST_INFO;
4174 case VIDIOC_MSM_CPP_FLUSH_QUEUE32:
4175 cmd = VIDIOC_MSM_CPP_FLUSH_QUEUE;
4177 case VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO32:
4178 case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO32:
4179 case VIDIOC_MSM_CPP_DELETE_STREAM_BUFF32:
4182 struct msm_cpp_stream_buff_info32_t *u32_cpp_buff_info =
4183 (struct msm_cpp_stream_buff_info32_t *)kp_ioctl.ioctl_ptr;
4185 get_user(k_cpp_buff_info.identity,
4186 &u32_cpp_buff_info->identity);
4187 get_user(k_cpp_buff_info.num_buffs,
4188 &u32_cpp_buff_info->num_buffs);
4189 get_user(p, &u32_cpp_buff_info->buffer_info);
4190 k_cpp_buff_info.buffer_info = compat_ptr(p);
4192 kp_ioctl.ioctl_ptr = (void *)&k_cpp_buff_info;
4193 if (is_compat_task()) {
4194 if (kp_ioctl.len != sizeof(
4195 struct msm_cpp_stream_buff_info32_t)) {
4196 mutex_unlock(&cpp_dev->mutex);
4200 sizeof(struct msm_cpp_stream_buff_info_t);
4203 is_copytouser_req = false;
4204 if (cmd == VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO32)
4205 cmd = VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO;
4206 else if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF32)
4207 cmd = VIDIOC_MSM_CPP_DELETE_STREAM_BUFF;
4209 cmd = VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO;
4212 case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO32: {
4213 uint32_t *identity_u = (uint32_t *)kp_ioctl.ioctl_ptr;
4215 get_user(identity_k, identity_u);
4216 kp_ioctl.ioctl_ptr = (void *)&identity_k;
4217 kp_ioctl.len = sizeof(uint32_t);
4218 is_copytouser_req = false;
4219 cmd = VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO;
4222 case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD32:
4224 struct msm_device_queue *queue = &cpp_dev->eventData_q;
4225 struct msm_queue_cmd *event_qcmd;
4226 struct msm_cpp_frame_info_t *process_frame;
4227 struct msm_cpp_frame_info32_t k32_process_frame;
4229 CPP_DBG("VIDIOC_MSM_CPP_GET_EVENTPAYLOAD\n");
4230 event_qcmd = msm_dequeue(queue, list_eventdata, POP_FRONT);
4232 pr_err("no queue cmd available");
4233 mutex_unlock(&cpp_dev->mutex);
4236 process_frame = event_qcmd->command;
4238 memset(&k32_process_frame, 0, sizeof(k32_process_frame));
4239 get_compat_frame_from_64bit(process_frame, &k32_process_frame);
4241 CPP_DBG("fid %d\n", process_frame->frame_id);
4242 if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
4244 sizeof(struct msm_cpp_frame_info32_t))) {
4245 kfree(process_frame->cpp_cmd_msg);
4246 kfree(process_frame);
4248 mutex_unlock(&cpp_dev->mutex);
4252 kfree(process_frame->cpp_cmd_msg);
4253 kfree(process_frame);
4255 cmd = VIDIOC_MSM_CPP_GET_EVENTPAYLOAD;
4258 case VIDIOC_MSM_CPP_SET_CLOCK32:
4260 struct msm_cpp_clock_settings32_t *clock_settings32 =
4261 (struct msm_cpp_clock_settings32_t *)kp_ioctl.ioctl_ptr;
4262 get_user(clock_settings.clock_rate,
4263 &clock_settings32->clock_rate);
4264 get_user(clock_settings.avg, &clock_settings32->avg);
4265 get_user(clock_settings.inst, &clock_settings32->inst);
4266 kp_ioctl.ioctl_ptr = (void *)&clock_settings;
4267 if (is_compat_task()) {
4268 if (kp_ioctl.len != sizeof(
4269 struct msm_cpp_clock_settings32_t)) {
4270 mutex_unlock(&cpp_dev->mutex);
4274 sizeof(struct msm_cpp_clock_settings_t);
4277 is_copytouser_req = false;
4278 cmd = VIDIOC_MSM_CPP_SET_CLOCK;
4281 case VIDIOC_MSM_CPP_QUEUE_BUF32:
4283 struct msm_pproc_queue_buf_info32_t *u32_queue_buf =
4284 (struct msm_pproc_queue_buf_info32_t *)kp_ioctl.ioctl_ptr;
4286 get_user(k_queue_buf.is_buf_dirty,
4287 &u32_queue_buf->is_buf_dirty);
4288 get_user(k_queue_buf.buff_mgr_info.session_id,
4289 &u32_queue_buf->buff_mgr_info.session_id);
4290 get_user(k_queue_buf.buff_mgr_info.stream_id,
4291 &u32_queue_buf->buff_mgr_info.stream_id);
4292 get_user(k_queue_buf.buff_mgr_info.frame_id,
4293 &u32_queue_buf->buff_mgr_info.frame_id);
4294 get_user(k_queue_buf.buff_mgr_info.index,
4295 &u32_queue_buf->buff_mgr_info.index);
4296 get_user(k_queue_buf.buff_mgr_info.timestamp.tv_sec,
4297 &u32_queue_buf->buff_mgr_info.timestamp.tv_sec);
4298 get_user(k_queue_buf.buff_mgr_info.timestamp.tv_usec,
4299 &u32_queue_buf->buff_mgr_info.timestamp.tv_usec);
4301 kp_ioctl.ioctl_ptr = (void *)&k_queue_buf;
4302 kp_ioctl.len = sizeof(struct msm_pproc_queue_buf_info);
4303 is_copytouser_req = false;
4304 cmd = VIDIOC_MSM_CPP_QUEUE_BUF;
4307 case VIDIOC_MSM_CPP_POP_STREAM_BUFFER32:
4309 if (kp_ioctl.len != sizeof(struct msm_cpp_frame_info32_t)) {
4310 mutex_unlock(&cpp_dev->mutex);
4313 kp_ioctl.len = sizeof(struct msm_cpp_frame_info_t);
4316 if (copy_from_user(&k32_frame_info,
4317 (void __user *)kp_ioctl.ioctl_ptr,
4318 sizeof(k32_frame_info))) {
4319 mutex_unlock(&cpp_dev->mutex);
4323 memset(&k64_frame_info, 0, sizeof(k64_frame_info));
4324 k64_frame_info.identity = k32_frame_info.identity;
4325 k64_frame_info.frame_id = k32_frame_info.frame_id;
4327 kp_ioctl.ioctl_ptr = (void *)&k64_frame_info;
4329 is_copytouser_req = false;
4330 cmd = VIDIOC_MSM_CPP_POP_STREAM_BUFFER;
4333 case VIDIOC_MSM_CPP_IOMMU_ATTACH32:
4334 case VIDIOC_MSM_CPP_IOMMU_DETACH32:
4336 if ((kp_ioctl.len != sizeof(struct msm_camera_smmu_attach_type))
4337 || (copy_from_user(&kb_cpp_smmu_attach_info,
4338 (void __user *)kp_ioctl.ioctl_ptr,
4339 sizeof(kb_cpp_smmu_attach_info)))) {
4340 mutex_unlock(&cpp_dev->mutex);
4344 kp_ioctl.ioctl_ptr = (void *)&kb_cpp_smmu_attach_info;
4345 is_copytouser_req = false;
4346 cmd = (cmd == VIDIOC_MSM_CPP_IOMMU_ATTACH32) ?
4347 VIDIOC_MSM_CPP_IOMMU_ATTACH :
4348 VIDIOC_MSM_CPP_IOMMU_DETACH;
4351 case MSM_SD_NOTIFY_FREEZE:
4353 case MSM_SD_UNNOTIFY_FREEZE:
4355 case MSM_SD_SHUTDOWN:
4356 cmd = MSM_SD_SHUTDOWN;
4359 pr_err_ratelimited("%s: unsupported compat type :%x LOAD %lu\n",
4360 __func__, cmd, VIDIOC_MSM_CPP_LOAD_FIRMWARE);
4361 mutex_unlock(&cpp_dev->mutex);
4365 mutex_unlock(&cpp_dev->mutex);
4367 case VIDIOC_MSM_CPP_LOAD_FIRMWARE:
4368 case VIDIOC_MSM_CPP_FLUSH_QUEUE:
4369 case VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO:
4370 case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO:
4371 case VIDIOC_MSM_CPP_DELETE_STREAM_BUFF:
4372 case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO:
4373 case VIDIOC_MSM_CPP_SET_CLOCK:
4374 case VIDIOC_MSM_CPP_QUEUE_BUF:
4375 case VIDIOC_MSM_CPP_POP_STREAM_BUFFER:
4376 case VIDIOC_MSM_CPP_IOMMU_ATTACH:
4377 case VIDIOC_MSM_CPP_IOMMU_DETACH:
4378 case MSM_SD_SHUTDOWN:
4379 rc = v4l2_subdev_call(sd, core, ioctl, cmd, &kp_ioctl);
4381 case VIDIOC_MSM_CPP_GET_HW_INFO:
4382 case VIDIOC_MSM_CPP_CFG:
4383 case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD:
4384 case VIDIOC_MSM_CPP_GET_INST_INFO:
4386 case MSM_SD_NOTIFY_FREEZE:
4388 case MSM_SD_UNNOTIFY_FREEZE:
4391 pr_err_ratelimited("%s: unsupported compat type :%d\n",
4396 if (is_copytouser_req) {
4397 up32_ioctl.id = kp_ioctl.id;
4398 up32_ioctl.len = kp_ioctl.len;
4399 up32_ioctl.trans_code = kp_ioctl.trans_code;
4400 up32_ioctl.ioctl_ptr = ptr_to_compat(kp_ioctl.ioctl_ptr);
4402 if (copy_to_user((void __user *)up, &up32_ioctl,
4403 sizeof(up32_ioctl)))
4411 struct v4l2_file_operations msm_cpp_v4l2_subdev_fops = {
4412 .unlocked_ioctl = msm_cpp_subdev_fops_ioctl,
4413 #ifdef CONFIG_COMPAT
4414 .compat_ioctl32 = msm_cpp_subdev_fops_compat_ioctl,
4417 static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
4421 uint32_t msm_cpp_reg_idx;
4423 pr_err("%s: cpp device invalid\n", __func__);
4427 msm_cpp_reg_idx = msm_cpp_get_regulator_index(cpp_dev, "vdd");
4428 if (msm_cpp_reg_idx < 0) {
4429 pr_err(" Fail to regulator index\n");
4432 rc = msm_camera_regulator_set_mode(cpp_dev->cpp_vdd +
4433 msm_cpp_reg_idx, 1, status);
4435 pr_err("update cpp gdscr status failed\n");
4440 static void msm_cpp_set_vbif_reg_values(struct cpp_device *cpp_dev)
4443 const u32 *vbif_qos_arr = NULL;
4444 int vbif_qos_len = 0;
4445 struct platform_device *pdev;
4447 pr_debug("%s\n", __func__);
4448 if (cpp_dev != NULL) {
4449 pdev = cpp_dev->pdev;
4450 vbif_qos_arr = of_get_property(pdev->dev.of_node,
4451 "qcom,vbif-qos-setting",
4453 if (!vbif_qos_arr || (vbif_qos_len & 1)) {
4454 pr_debug("%s: vbif qos setting not found\n",
4458 vbif_qos_len /= sizeof(u32);
4459 pr_debug("%s: vbif_qos_len %d\n", __func__, vbif_qos_len);
4460 if (cpp_dev->vbif_base) {
4461 for (i = 0; i < vbif_qos_len; i = i+2) {
4462 reg = be32_to_cpu(vbif_qos_arr[i]);
4463 val = be32_to_cpu(vbif_qos_arr[i+1]);
4464 pr_debug("%s: DT: offset %x, val %x\n",
4465 __func__, reg, val);
4466 pr_debug("%s: before write to register 0x%x\n",
4467 __func__, msm_camera_io_r(
4468 cpp_dev->vbif_base + reg));
4469 msm_camera_io_w(val, cpp_dev->vbif_base + reg);
4470 pr_debug("%s: after write to register 0x%x\n",
4471 __func__, msm_camera_io_r(
4472 cpp_dev->vbif_base + reg));
4478 static int msm_cpp_buffer_private_ops(struct cpp_device *cpp_dev,
4479 uint32_t buff_mgr_ops, uint32_t id, void *arg) {
4484 case MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX: {
4485 struct msm_camera_private_ioctl_arg ioctl_arg;
4486 struct msm_buf_mngr_info *buff_mgr_info =
4487 (struct msm_buf_mngr_info *)arg;
4489 ioctl_arg.id = MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX;
4490 ioctl_arg.size = sizeof(struct msm_buf_mngr_info);
4491 ioctl_arg.result = 0;
4492 ioctl_arg.reserved = 0x0;
4493 ioctl_arg.ioctl_ptr = 0x0;
4494 MSM_CAM_GET_IOCTL_ARG_PTR(&ioctl_arg.ioctl_ptr, &buff_mgr_info,
4496 rc = cpp_dev->buf_mgr_ops.msm_cam_buf_mgr_ops(buff_mgr_ops,
4498 /* Use VIDIOC_MSM_BUF_MNGR_GET_BUF if getbuf with indx fails */
4500 pr_err_ratelimited("get_buf_by_idx for %d err %d,use get_buf\n",
4501 buff_mgr_info->index, rc);
4502 rc = cpp_dev->buf_mgr_ops.msm_cam_buf_mgr_ops(
4503 VIDIOC_MSM_BUF_MNGR_GET_BUF, buff_mgr_info);
4508 pr_err("unsupported buffer manager ioctl\n");
4515 static int cpp_probe(struct platform_device *pdev)
4517 struct cpp_device *cpp_dev;
4523 cpp_dev = kzalloc(sizeof(struct cpp_device), GFP_KERNEL);
4525 pr_err("no enough memory\n");
4529 v4l2_subdev_init(&cpp_dev->msm_sd.sd, &msm_cpp_subdev_ops);
4530 cpp_dev->msm_sd.sd.internal_ops = &msm_cpp_internal_ops;
4531 snprintf(cpp_dev->msm_sd.sd.name, ARRAY_SIZE(cpp_dev->msm_sd.sd.name),
4533 cpp_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
4534 cpp_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
4535 v4l2_set_subdevdata(&cpp_dev->msm_sd.sd, cpp_dev);
4536 platform_set_drvdata(pdev, &cpp_dev->msm_sd.sd);
4537 mutex_init(&cpp_dev->mutex);
4538 spin_lock_init(&cpp_dev->tasklet_lock);
4539 spin_lock_init(&cpp_timer.data.processed_frame_lock);
4541 cpp_dev->pdev = pdev;
4542 memset(&cpp_vbif, 0, sizeof(struct msm_cpp_vbif_data));
4543 cpp_dev->vbif_data = &cpp_vbif;
4546 msm_camera_get_reg_base(pdev, "cpp", true);
4547 if (!cpp_dev->base) {
4549 pr_err("failed to get cpp_base\n");
4550 goto cpp_base_failed;
4553 cpp_dev->vbif_base =
4554 msm_camera_get_reg_base(pdev, "cpp_vbif", false);
4555 if (!cpp_dev->vbif_base) {
4557 pr_err("failed to get vbif_base\n");
4558 goto vbif_base_failed;
4561 cpp_dev->cpp_hw_base =
4562 msm_camera_get_reg_base(pdev, "cpp_hw", true);
4563 if (!cpp_dev->cpp_hw_base) {
4565 pr_err("failed to get cpp_hw_base\n");
4566 goto cpp_hw_base_failed;
4569 cpp_dev->irq = msm_camera_get_irq(pdev, "cpp");
4570 if (!cpp_dev->irq) {
4571 pr_err("%s: no irq resource?\n", __func__);
4576 rc = msm_camera_get_clk_info(pdev, &cpp_dev->clk_info,
4577 &cpp_dev->cpp_clk, &cpp_dev->num_clks);
4579 pr_err("%s: failed to get the clocks\n", __func__);
4583 /* set memcore and mem periphery logic flags to 0 */
4584 for (i = 0; i < cpp_dev->num_clks; i++) {
4585 if ((strcmp(cpp_dev->clk_info[i].clk_name,
4586 "cpp_core_clk") == 0) ||
4587 (strcmp(cpp_dev->clk_info[i].clk_name,
4588 "camss_cpp_axi_clk") == 0) ||
4589 (strcmp(cpp_dev->clk_info[i].clk_name,
4590 "micro_iface_clk") == 0)) {
4591 msm_camera_set_clk_flags(cpp_dev->cpp_clk[i],
4592 CLKFLAG_NORETAIN_MEM);
4593 msm_camera_set_clk_flags(cpp_dev->cpp_clk[i],
4594 CLKFLAG_NORETAIN_PERIPH);
4598 if (of_find_property(pdev->dev.of_node, "qcom,cpp-cx-ipeak", NULL)) {
4599 cpp_dev->cpp_cx_ipeak = cx_ipeak_register(
4600 pdev->dev.of_node, "qcom,cpp-cx-ipeak");
4601 if (cpp_dev->cpp_cx_ipeak)
4602 CPP_DBG("Cx ipeak Registration Successful ");
4604 pr_err("Cx ipeak Registration Unsuccessful");
4607 rc = msm_camera_get_reset_info(pdev,
4608 &cpp_dev->micro_iface_reset);
4610 cpp_dev->micro_iface_reset = NULL;
4611 pr_err("%s: failed to get micro_iface_reset\n",
4616 rc = msm_camera_get_regulator_info(pdev, &cpp_dev->cpp_vdd,
4619 pr_err("%s: failed to get the regulators\n", __func__);
4623 msm_cpp_fetch_dt_params(cpp_dev);
4625 rc = msm_cpp_read_payload_params_from_dt(cpp_dev);
4627 goto cpp_probe_init_error;
4629 if (cpp_dev->bus_master_flag)
4630 rc = msm_cpp_init_bandwidth_mgr(cpp_dev);
4632 rc = msm_isp_init_bandwidth_mgr(NULL, ISP_CPP);
4634 pr_err("%s: Bandwidth registration Failed!\n", __func__);
4635 goto cpp_probe_init_error;
4638 cpp_dev->state = CPP_STATE_BOOT;
4639 rc = cpp_init_hardware(cpp_dev);
4643 media_entity_init(&cpp_dev->msm_sd.sd.entity, 0, NULL, 0);
4644 cpp_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
4645 cpp_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_CPP;
4646 cpp_dev->msm_sd.sd.entity.name = pdev->name;
4647 cpp_dev->msm_sd.close_seq = MSM_SD_CLOSE_3RD_CATEGORY;
4648 msm_sd_register(&cpp_dev->msm_sd);
4649 msm_cam_copy_v4l2_subdev_fops(&msm_cpp_v4l2_subdev_fops);
4650 msm_cpp_v4l2_subdev_fops.unlocked_ioctl = msm_cpp_subdev_fops_ioctl;
4651 #ifdef CONFIG_COMPAT
4652 msm_cpp_v4l2_subdev_fops.compat_ioctl32 =
4653 msm_cpp_subdev_fops_compat_ioctl;
4656 cpp_dev->msm_sd.sd.devnode->fops = &msm_cpp_v4l2_subdev_fops;
4657 cpp_dev->msm_sd.sd.entity.revision = cpp_dev->msm_sd.sd.devnode->num;
4659 msm_camera_io_w(0x0, cpp_dev->base +
4660 MSM_CPP_MICRO_IRQGEN_MASK);
4661 msm_camera_io_w(0xFFFF, cpp_dev->base +
4662 MSM_CPP_MICRO_IRQGEN_CLR);
4663 msm_camera_io_w(0x80000000, cpp_dev->base + 0xF0);
4664 cpp_release_hardware(cpp_dev);
4665 cpp_dev->state = CPP_STATE_OFF;
4666 msm_cpp_enable_debugfs(cpp_dev);
4668 msm_queue_init(&cpp_dev->eventData_q, "eventdata");
4669 msm_queue_init(&cpp_dev->processing_q, "frame");
4670 INIT_LIST_HEAD(&cpp_dev->tasklet_q);
4671 tasklet_init(&cpp_dev->cpp_tasklet, msm_cpp_do_tasklet,
4672 (unsigned long)cpp_dev);
4673 cpp_dev->timer_wq = create_workqueue("msm_cpp_workqueue");
4674 cpp_dev->work = kmalloc(sizeof(struct msm_cpp_work_t),
4677 if (!cpp_dev->work) {
4678 pr_err("no enough memory\n");
4683 INIT_WORK((struct work_struct *)cpp_dev->work, msm_cpp_do_timeout_work);
4684 cpp_dev->cpp_open_cnt = 0;
4685 cpp_dev->is_firmware_loaded = 0;
4686 cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
4687 cpp_timer.data.cpp_dev = cpp_dev;
4688 atomic_set(&cpp_timer.used, 0);
4689 /* install timer for cpp timeout */
4690 CPP_DBG("Installing cpp_timer\n");
4691 setup_timer(&cpp_timer.cpp_timer,
4692 cpp_timer_callback, (unsigned long)&cpp_timer);
4693 cpp_dev->fw_name_bin = NULL;
4694 cpp_dev->max_timeout_trial_cnt = MSM_CPP_MAX_TIMEOUT_TRIAL;
4698 CPP_DBG("SUCCESS.");
4704 if (cpp_dev->bus_master_flag)
4705 msm_cpp_deinit_bandwidth_mgr(cpp_dev);
4707 msm_isp_deinit_bandwidth_mgr(ISP_CPP);
4708 cpp_probe_init_error:
4709 media_entity_cleanup(&cpp_dev->msm_sd.sd.entity);
4710 msm_sd_unregister(&cpp_dev->msm_sd);
4712 reset_control_put(cpp_dev->micro_iface_reset);
4714 msm_camera_put_clk_info(pdev, &cpp_dev->clk_info, &cpp_dev->cpp_clk,
4717 msm_camera_put_reg_base(pdev, cpp_dev->cpp_hw_base, "cpp_hw", true);
4719 msm_camera_put_reg_base(pdev, cpp_dev->vbif_base, "cpp_vbif", false);
4721 msm_camera_put_reg_base(pdev, cpp_dev->base, "cpp", true);
4723 msm_camera_put_reg_base(pdev, cpp_dev->camss_cpp_base,
4730 static const struct of_device_id msm_cpp_dt_match[] = {
4731 {.compatible = "qcom,cpp"},
4735 static int cpp_device_remove(struct platform_device *dev)
4737 struct v4l2_subdev *sd = platform_get_drvdata(dev);
4738 struct cpp_device *cpp_dev;
4741 pr_err("%s: Subdevice is NULL\n", __func__);
4745 cpp_dev = (struct cpp_device *)v4l2_get_subdevdata(sd);
4747 pr_err("%s: cpp device is NULL\n", __func__);
4752 release_firmware(cpp_dev->fw);
4755 if (cpp_dev->bus_master_flag)
4756 msm_cpp_deinit_bandwidth_mgr(cpp_dev);
4758 msm_isp_deinit_bandwidth_mgr(ISP_CPP);
4759 msm_sd_unregister(&cpp_dev->msm_sd);
4760 msm_camera_put_reg_base(dev, cpp_dev->camss_cpp_base,
4762 msm_camera_put_reg_base(dev, cpp_dev->base, "cpp", true);
4763 msm_camera_put_reg_base(dev, cpp_dev->vbif_base, "cpp_vbif", false);
4764 msm_camera_put_reg_base(dev, cpp_dev->cpp_hw_base, "cpp_hw", true);
4765 msm_camera_put_regulators(dev, &cpp_dev->cpp_vdd,
4767 msm_camera_put_clk_info(dev, &cpp_dev->clk_info,
4768 &cpp_dev->cpp_clk, cpp_dev->num_clks);
4769 msm_camera_unregister_bus_client(CAM_BUS_CLIENT_CPP);
4770 mutex_destroy(&cpp_dev->mutex);
4771 kfree(cpp_dev->work);
4773 reset_control_put(cpp_dev->micro_iface_reset);
4775 destroy_workqueue(cpp_dev->timer_wq);
4776 kfree(cpp_dev->cpp_clk);
4781 static struct platform_driver cpp_driver = {
4783 .remove = cpp_device_remove,
4785 .name = MSM_CPP_DRV_NAME,
4786 .owner = THIS_MODULE,
4787 .of_match_table = msm_cpp_dt_match,
4791 static int __init msm_cpp_init_module(void)
4793 return platform_driver_register(&cpp_driver);
4796 static void __exit msm_cpp_exit_module(void)
4798 platform_driver_unregister(&cpp_driver);
4801 static int msm_cpp_debugfs_error_s(void *data, u64 val)
4803 pr_err("setting error inducement");
4808 DEFINE_SIMPLE_ATTRIBUTE(cpp_debugfs_error, NULL,
4809 msm_cpp_debugfs_error_s, "%llu\n");
4811 static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev)
4813 struct dentry *debugfs_base;
4815 debugfs_base = debugfs_create_dir("msm_cpp", NULL);
4819 if (!debugfs_create_file("error", S_IRUGO | S_IWUSR, debugfs_base,
4820 (void *)cpp_dev, &cpp_debugfs_error))
4826 module_init(msm_cpp_init_module);
4827 module_exit(msm_cpp_exit_module);
4828 MODULE_DESCRIPTION("MSM CPP driver");
4829 MODULE_LICENSE("GPL v2");