1 /* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/uaccess.h>
16 #include <linux/diagchar.h>
17 #include <linux/sched.h>
18 #include <linux/err.h>
19 #include <linux/delay.h>
20 #include <linux/workqueue.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_wakeup.h>
24 #include <linux/spinlock.h>
25 #include <linux/ratelimit.h>
26 #include <linux/reboot.h>
27 #include <asm/current.h>
28 #include <soc/qcom/restart.h>
29 #include <linux/vmalloc.h>
30 #ifdef CONFIG_DIAG_OVER_USB
31 #include <linux/usb/usbdiag.h>
33 #include "diagchar_hdlc.h"
37 #include "diagfwd_cntl.h"
39 #include "diag_masks.h"
40 #include "diagfwd_bridge.h"
41 #include "diagfwd_peripheral.h"
42 #include "diag_ipc_logging.h"
44 static struct timer_list dci_drain_timer;
45 static int dci_timer_in_progress;
46 static struct work_struct dci_data_drain_work;
48 struct diag_dci_partial_pkt_t partial_pkt;
50 unsigned int dci_max_reg = 100;
51 unsigned int dci_max_clients = 10;
52 struct mutex dci_log_mask_mutex;
53 struct mutex dci_event_mask_mutex;
56 * DCI_HANDSHAKE_RETRY_TIME: Time to wait (in microseconds) before checking the
57 * connection status again.
59 * DCI_HANDSHAKE_WAIT_TIME: Timeout (in milliseconds) to check for dci
62 #define DCI_HANDSHAKE_RETRY_TIME 500000
63 #define DCI_HANDSHAKE_WAIT_TIME 200
66 unsigned long ws_lock_flags;
68 struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC] = {
71 .send_log_mask = diag_send_dci_log_mask,
72 .send_event_mask = diag_send_dci_event_mask,
73 .peripheral_status = 0,
76 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
78 .ctx = DIAGFWD_MDM_DCI,
79 .send_log_mask = diag_send_dci_log_mask_remote,
80 .send_event_mask = diag_send_dci_event_mask_remote,
81 .peripheral_status = 0,
82 .mempool = POOL_TYPE_MDM_DCI_WRITE,
87 struct dci_channel_status_t dci_channel_status[NUM_DCI_PROC] = {
93 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
95 .id = DIAGFWD_MDM_DCI,
102 /* Number of milliseconds anticipated to process the DCI data */
103 #define DCI_WAKEUP_TIMEOUT 1
105 #define DCI_CAN_ADD_BUF_TO_LIST(buf) \
106 (buf && buf->data && !buf->in_busy && buf->data_len > 0) \
108 #ifdef CONFIG_DEBUG_FS
109 struct diag_dci_data_info *dci_traffic;
110 struct mutex dci_stat_mutex;
111 void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
112 uint8_t peripheral, uint8_t proc)
114 static int curr_dci_data;
115 static unsigned long iteration;
116 struct diag_dci_data_info *temp_data = dci_traffic;
119 mutex_lock(&dci_stat_mutex);
120 if (curr_dci_data == DIAG_DCI_DEBUG_CNT)
122 temp_data += curr_dci_data;
123 temp_data->iteration = iteration + 1;
124 temp_data->data_size = read_bytes;
125 temp_data->peripheral = peripheral;
126 temp_data->ch_type = ch_type;
127 temp_data->proc = proc;
128 diag_get_timestamp(temp_data->time_stamp);
131 mutex_unlock(&dci_stat_mutex);
134 void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
135 uint8_t peripheral, uint8_t proc) { }
138 static int check_peripheral_dci_support(int peripheral_id, int dci_proc_id)
140 int dci_peripheral_list = 0;
142 if (dci_proc_id < 0 || dci_proc_id >= NUM_DCI_PROC) {
143 pr_err("diag:In %s,not a supported DCI proc id\n", __func__);
146 if (peripheral_id < 0 || peripheral_id >= NUM_PERIPHERALS) {
147 pr_err("diag:In %s,not a valid peripheral id\n", __func__);
150 dci_peripheral_list = dci_ops_tbl[dci_proc_id].peripheral_status;
152 if (dci_peripheral_list <= 0 || dci_peripheral_list > DIAG_CON_ALL) {
153 pr_err("diag:In %s,not a valid dci peripheral mask\n",
157 /* Remove APSS bit mask information */
158 dci_peripheral_list = dci_peripheral_list >> 1;
160 if ((1 << peripheral_id) & (dci_peripheral_list))
166 static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
168 unsigned char *temp = mask;
174 /* create hard coded table for log mask with 16 categories */
175 for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
178 *temp = dirty ? 1 : 0;
180 memset(temp, 0, DCI_MAX_ITEMS_PER_LOG_CODE);
181 temp += DCI_MAX_ITEMS_PER_LOG_CODE;
185 static void create_dci_event_mask_tbl(unsigned char *tbl_buf)
188 memset(tbl_buf, 0, DCI_EVENT_MASK_SIZE);
191 void dci_drain_data(unsigned long data)
193 queue_work(driver->diag_dci_wq, &dci_data_drain_work);
196 static void dci_check_drain_timer(void)
198 if (!dci_timer_in_progress) {
199 dci_timer_in_progress = 1;
200 mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(200));
204 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
205 static void dci_handshake_work_fn(struct work_struct *work)
210 struct dci_channel_status_t *status = container_of(work,
211 struct dci_channel_status_t,
215 pr_debug("diag: In %s, remote dci channel is open, index: %d\n",
216 __func__, status->id);
220 if (status->retry_count == max_retries) {
221 status->retry_count = 0;
222 pr_info("diag: dci channel connection handshake timed out, id: %d\n",
224 err = diagfwd_bridge_close(TOKEN_TO_BRIDGE(status->id));
226 pr_err("diag: In %s, unable to close dci channel id: %d, err: %d\n",
227 __func__, status->id, err);
231 status->retry_count++;
233 * Sleep for sometime to check for the connection status again. The
234 * value should be optimum to include a roundabout time for a small
235 * packet to the remote processor.
237 usleep_range(DCI_HANDSHAKE_RETRY_TIME, DCI_HANDSHAKE_RETRY_TIME + 100);
238 mod_timer(&status->wait_time,
239 jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
242 static void dci_chk_handshake(unsigned long data)
244 int index = (int)data;
246 if (index < 0 || index >= NUM_DCI_PROC)
249 queue_work(driver->diag_dci_wq,
250 &dci_channel_status[index].handshake_work);
254 static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
256 if (!buffer || buffer->data)
260 case DCI_BUF_PRIMARY:
261 buffer->capacity = IN_BUF_SIZE;
262 buffer->data = vzalloc(buffer->capacity);
266 case DCI_BUF_SECONDARY:
268 buffer->capacity = IN_BUF_SIZE;
271 buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
272 buffer->data = vzalloc(buffer->capacity);
277 pr_err("diag: In %s, unknown type %d", __func__, type);
281 buffer->data_len = 0;
283 buffer->buf_type = type;
284 mutex_init(&buffer->data_mutex);
289 static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
294 /* Return 1 if the buffer is not busy and can hold new data */
295 if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
301 static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
302 struct diag_dci_buffer_t *buf)
304 if (!buf || !client || !buf->data)
307 if (buf->in_list || buf->data_len == 0)
310 mutex_lock(&client->write_buf_mutex);
311 list_add_tail(&buf->buf_track, &client->list_write_buf);
313 * In the case of DCI, there can be multiple packets in one read. To
314 * calculate the wakeup source reference count, we must account for each
315 * packet in a single read.
317 diag_ws_on_read(DIAG_WS_DCI, buf->data_len);
318 mutex_lock(&buf->data_mutex);
321 mutex_unlock(&buf->data_mutex);
322 mutex_unlock(&client->write_buf_mutex);
325 static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
326 int data_source, int len)
328 struct diag_dci_buffer_t *buf_primary = NULL;
329 struct diag_dci_buffer_t *buf_temp = NULL;
330 struct diag_dci_buffer_t *curr = NULL;
334 if (len < 0 || len > IN_BUF_SIZE)
337 curr = client->buffers[data_source].buf_curr;
338 buf_primary = client->buffers[data_source].buf_primary;
340 if (curr && diag_dci_check_buffer(curr, len) == 1)
343 dci_add_buffer_to_list(client, curr);
344 client->buffers[data_source].buf_curr = NULL;
346 if (diag_dci_check_buffer(buf_primary, len) == 1) {
347 client->buffers[data_source].buf_curr = buf_primary;
351 buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
355 if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
356 buf_temp->data = diagmem_alloc(driver, IN_BUF_SIZE,
358 if (!buf_temp->data) {
363 client->buffers[data_source].buf_curr = buf_temp;
372 void diag_dci_wakeup_clients()
374 struct list_head *start, *temp;
375 struct diag_dci_client_tbl *entry = NULL;
377 mutex_lock(&driver->dci_mutex);
378 list_for_each_safe(start, temp, &driver->dci_client_list) {
379 entry = list_entry(start, struct diag_dci_client_tbl, track);
382 * Don't wake up the client when there is no pending buffer to
383 * write or when it is writing to user space
385 if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
386 mutex_lock(&entry->write_buf_mutex);
387 entry->in_service = 1;
388 mutex_unlock(&entry->write_buf_mutex);
389 diag_update_sleeping_process(entry->client->tgid,
393 mutex_unlock(&driver->dci_mutex);
396 void dci_data_drain_work_fn(struct work_struct *work)
399 struct list_head *start, *temp;
400 struct diag_dci_client_tbl *entry = NULL;
401 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
402 struct diag_dci_buffer_t *buf_temp = NULL;
404 mutex_lock(&driver->dci_mutex);
405 list_for_each_safe(start, temp, &driver->dci_client_list) {
406 entry = list_entry(start, struct diag_dci_client_tbl, track);
407 for (i = 0; i < entry->num_buffers; i++) {
408 proc_buf = &entry->buffers[i];
410 mutex_lock(&proc_buf->buf_mutex);
411 buf_temp = proc_buf->buf_primary;
412 if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
413 dci_add_buffer_to_list(entry, buf_temp);
415 buf_temp = proc_buf->buf_cmd;
416 if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
417 dci_add_buffer_to_list(entry, buf_temp);
419 buf_temp = proc_buf->buf_curr;
420 if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
421 dci_add_buffer_to_list(entry, buf_temp);
422 proc_buf->buf_curr = NULL;
424 mutex_unlock(&proc_buf->buf_mutex);
426 if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
427 mutex_lock(&entry->write_buf_mutex);
428 entry->in_service = 1;
429 mutex_unlock(&entry->write_buf_mutex);
430 diag_update_sleeping_process(entry->client->tgid,
434 mutex_unlock(&driver->dci_mutex);
435 dci_timer_in_progress = 0;
438 static int diag_process_single_dci_pkt(unsigned char *buf, int len,
439 int data_source, int token)
441 uint8_t cmd_code = 0;
443 if (!buf || len < 0) {
444 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
449 cmd_code = *(uint8_t *)buf;
453 extract_dci_log(buf, len, data_source, token, NULL);
456 extract_dci_events(buf, len, data_source, token, NULL);
458 case EXT_HDR_CMD_CODE:
459 extract_dci_ext_pkt(buf, len, data_source, token);
461 case DCI_PKT_RSP_CODE:
462 case DCI_DELAYED_RSP_CODE:
463 extract_dci_pkt_rsp(buf, len, data_source, token);
465 case DCI_CONTROL_PKT_CODE:
466 extract_dci_ctrl_pkt(buf, len, token);
469 pr_err("diag: Unable to process single DCI packet, cmd_code: %d, data_source: %d",
470 cmd_code, data_source);
477 /* Process the data read from apps userspace client */
478 void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
483 pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
487 if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
488 && data_type != DCI_PKT_TYPE) {
489 pr_err("diag: In %s, unsupported data_type: 0x%x\n",
490 __func__, (unsigned int)data_type);
494 err = diag_process_single_dci_pkt(buf, recd_bytes, APPS_DATA,
499 /* wake up all sleeping DCI clients which have some data */
500 diag_dci_wakeup_clients();
501 dci_check_drain_timer();
504 void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes)
506 int read_bytes = 0, err = 0;
507 uint16_t dci_pkt_len;
508 struct diag_dci_header_t *header = NULL;
509 int header_len = sizeof(struct diag_dci_header_t);
510 int token = BRIDGE_TO_TOKEN(index);
515 diag_dci_record_traffic(recd_bytes, 0, 0, token);
517 if (!partial_pkt.processing)
520 if (partial_pkt.remaining > recd_bytes) {
521 if ((partial_pkt.read_len + recd_bytes) >
522 (MAX_DCI_PACKET_SZ)) {
523 pr_err("diag: Invalid length %d, %d received in %s\n",
524 partial_pkt.read_len, recd_bytes, __func__);
527 memcpy(partial_pkt.data + partial_pkt.read_len, buf,
529 read_bytes += recd_bytes;
531 partial_pkt.read_len += recd_bytes;
532 partial_pkt.remaining -= recd_bytes;
534 if ((partial_pkt.read_len + partial_pkt.remaining) >
535 (MAX_DCI_PACKET_SZ)) {
536 pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
537 partial_pkt.read_len,
538 partial_pkt.remaining, __func__);
541 memcpy(partial_pkt.data + partial_pkt.read_len, buf,
542 partial_pkt.remaining);
543 read_bytes += partial_pkt.remaining;
545 partial_pkt.read_len += partial_pkt.remaining;
546 partial_pkt.remaining = 0;
549 if (partial_pkt.remaining == 0) {
551 * Retrieve from the DCI control packet after the header = start
552 * (1 byte) + version (1 byte) + length (2 bytes)
554 diag_process_single_dci_pkt(partial_pkt.data + 4,
555 partial_pkt.read_len - header_len,
556 DCI_REMOTE_DATA, token);
557 partial_pkt.read_len = 0;
558 partial_pkt.total_len = 0;
559 partial_pkt.processing = 0;
565 while (read_bytes < recd_bytes) {
566 header = (struct diag_dci_header_t *)buf;
567 dci_pkt_len = header->length;
569 if (header->cmd_code != DCI_CONTROL_PKT_CODE &&
570 driver->num_dci_client == 0) {
571 read_bytes += header_len + dci_pkt_len;
572 buf += header_len + dci_pkt_len;
576 if (dci_pkt_len + header_len > MAX_DCI_PACKET_SZ) {
577 pr_err("diag: Invalid length in the dci packet field %d\n",
582 if ((dci_pkt_len + header_len) > (recd_bytes - read_bytes)) {
583 partial_pkt.read_len = recd_bytes - read_bytes;
584 partial_pkt.total_len = dci_pkt_len + header_len;
585 partial_pkt.remaining = partial_pkt.total_len -
586 partial_pkt.read_len;
587 partial_pkt.processing = 1;
588 memcpy(partial_pkt.data, buf, partial_pkt.read_len);
592 * Retrieve from the DCI control packet after the header = start
593 * (1 byte) + version (1 byte) + length (2 bytes)
595 err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
596 DCI_REMOTE_DATA, DCI_MDM_PROC);
599 read_bytes += header_len + dci_pkt_len;
600 buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
605 /* wake up all sleeping DCI clients which have some data */
606 diag_dci_wakeup_clients();
607 dci_check_drain_timer();
611 /* Process the data read from the peripheral dci channels */
612 void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
615 int read_bytes = 0, err = 0;
616 uint16_t dci_pkt_len;
617 struct diag_dci_pkt_header_t *header = NULL;
618 uint8_t recv_pkt_cmd_code;
624 * Release wakeup source when there are no more clients to
627 if (driver->num_dci_client == 0) {
628 diag_ws_reset(DIAG_WS_DCI);
632 diag_dci_record_traffic(recd_bytes, p_info->type, p_info->peripheral,
634 while (read_bytes < recd_bytes) {
635 header = (struct diag_dci_pkt_header_t *)buf;
636 recv_pkt_cmd_code = header->pkt_code;
637 dci_pkt_len = header->len;
640 * Check if the length of the current packet is lesser than the
641 * remaining bytes in the received buffer. This includes space
642 * for the Start byte (1), Version byte (1), length bytes (2)
645 if ((dci_pkt_len + 5) > (recd_bytes - read_bytes)) {
646 pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
647 __func__, recd_bytes, dci_pkt_len);
652 * Retrieve from the DCI control packet after the header = start
653 * (1 byte) + version (1 byte) + length (2 bytes)
655 err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
656 (int)p_info->peripheral,
662 read_bytes += 5 + dci_pkt_len;
663 buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
668 /* wake up all sleeping DCI clients which have some data */
669 diag_dci_wakeup_clients();
670 dci_check_drain_timer();
674 int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
678 uint8_t equip_id, *log_mask_ptr, byte_mask;
679 int byte_index, offset;
682 pr_err("diag: In %s, invalid client entry\n", __func__);
686 equip_id = LOG_GET_EQUIP_ID(log_code);
687 item_num = LOG_GET_ITEM_NUM(log_code);
688 byte_index = item_num/8 + 2;
689 byte_mask = 0x01 << (item_num % 8);
690 offset = equip_id * 514;
692 if (offset + byte_index >= DCI_LOG_MASK_SIZE) {
693 pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
694 __func__, offset, log_code, byte_index);
698 log_mask_ptr = entry->dci_log_mask;
699 log_mask_ptr = log_mask_ptr + offset + byte_index;
700 return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
704 int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
707 uint8_t *event_mask_ptr, byte_mask;
708 int byte_index, bit_index;
711 pr_err("diag: In %s, invalid client entry\n", __func__);
715 byte_index = event_id/8;
716 bit_index = event_id % 8;
717 byte_mask = 0x1 << bit_index;
719 if (byte_index >= DCI_EVENT_MASK_SIZE) {
720 pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
721 __func__, event_id, byte_index);
725 event_mask_ptr = entry->dci_event_mask;
726 event_mask_ptr = event_mask_ptr + byte_index;
727 return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
730 static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
735 switch (header->cmd_code) {
736 case 0x7d: /* Msg Mask Configuration */
737 case 0x73: /* Log Mask Configuration */
738 case 0x81: /* Event Mask Configuration */
739 case 0x82: /* Event Mask Change */
740 case 0x60: /* Event Mask Toggle */
744 if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
745 switch (header->subsys_cmd_code) {
746 case 0x60: /* Extended Event Mask Config */
747 case 0x61: /* Extended Msg Mask Config */
748 case 0x62: /* Extended Log Mask Config */
749 case 0x20C: /* Set current Preset ID */
750 case 0x20D: /* Get current Preset ID */
751 case 0x218: /* HDLC Disabled Command */
759 static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
762 struct dci_pkt_req_entry_t *entry = NULL;
763 entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
768 entry->client_id = client_id;
770 entry->tag = driver->dci_tag;
771 pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
772 entry->client_id, entry->uid, entry->tag);
773 list_add_tail(&entry->track, &driver->dci_req_list);
778 static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
780 struct list_head *start, *temp;
781 struct dci_pkt_req_entry_t *entry = NULL;
782 list_for_each_safe(start, temp, &driver->dci_req_list) {
783 entry = list_entry(start, struct dci_pkt_req_entry_t, track);
784 if (entry->tag == tag)
790 static int diag_dci_remove_req_entry(unsigned char *buf, int len,
791 struct dci_pkt_req_entry_t *entry)
793 uint16_t rsp_count = 0, delayed_rsp_id = 0;
794 if (!buf || len <= 0 || !entry) {
795 pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
796 __func__, buf, len, entry);
800 /* It is an immediate response, delete it from the table */
802 list_del(&entry->track);
808 /* It is a delayed response. Check if the length is valid */
809 if (len < MIN_DELAYED_RSP_LEN) {
810 pr_err("diag: Invalid delayed rsp packet length %d\n", len);
815 * If the delayed response id field (uint16_t at byte 8) is 0 then
816 * there is only one response and we can remove the request entry.
818 delayed_rsp_id = *(uint16_t *)(buf + 8);
819 if (delayed_rsp_id == 0) {
820 list_del(&entry->track);
827 * Check the response count field (uint16 at byte 10). The request
828 * entry can be deleted it it is the last response in the sequence.
829 * It is the last response in the sequence if the response count
830 * is 1 or if the signed bit gets dropped.
832 rsp_count = *(uint16_t *)(buf + 10);
833 if (rsp_count > 0 && rsp_count < 0x1000) {
834 list_del(&entry->track);
843 static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
845 struct diag_ctrl_dci_status *header = NULL;
846 unsigned char *temp = buf;
847 uint32_t read_len = 0;
849 int peripheral_mask, status;
851 if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
852 pr_err("diag: In %s, invalid buf %pK or length: %d\n",
857 if (!VALID_DCI_TOKEN(token)) {
858 pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
862 header = (struct diag_ctrl_dci_status *)temp;
863 temp += sizeof(struct diag_ctrl_dci_status);
864 read_len += sizeof(struct diag_ctrl_dci_status);
866 for (i = 0; i < header->count; i++) {
867 if (read_len > (len - 2)) {
868 pr_err("diag: In %s, Invalid length len: %d\n",
873 switch (*(uint8_t *)temp) {
874 case PERIPHERAL_MODEM:
875 peripheral_mask = DIAG_CON_MPSS;
877 case PERIPHERAL_LPASS:
878 peripheral_mask = DIAG_CON_LPASS;
880 case PERIPHERAL_WCNSS:
881 peripheral_mask = DIAG_CON_WCNSS;
883 case PERIPHERAL_SENSORS:
884 peripheral_mask = DIAG_CON_SENSORS;
887 pr_err("diag: In %s, unknown peripheral, peripheral: %d\n",
888 __func__, *(uint8_t *)temp);
891 temp += sizeof(uint8_t);
892 read_len += sizeof(uint8_t);
894 status = (*(uint8_t *)temp) ? DIAG_STATUS_OPEN :
896 temp += sizeof(uint8_t);
897 read_len += sizeof(uint8_t);
898 diag_dci_notify_client(peripheral_mask, status, token);
902 static void dci_process_ctrl_handshake_pkt(unsigned char *buf, int len,
905 struct diag_ctrl_dci_handshake_pkt *header = NULL;
906 unsigned char *temp = buf;
909 if (!buf || (len < sizeof(struct diag_ctrl_dci_handshake_pkt)))
912 if (!VALID_DCI_TOKEN(token))
915 header = (struct diag_ctrl_dci_handshake_pkt *)temp;
916 if (header->magic == DCI_MAGIC) {
917 dci_channel_status[token].open = 1;
918 err = dci_ops_tbl[token].send_log_mask(token);
920 pr_err("diag: In %s, unable to send log mask to token: %d, err: %d\n",
921 __func__, token, err);
923 err = dci_ops_tbl[token].send_event_mask(token);
925 pr_err("diag: In %s, unable to send event mask to token: %d, err: %d\n",
926 __func__, token, err);
931 void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token)
933 unsigned char *temp = buf;
934 uint32_t ctrl_pkt_id;
936 diag_ws_on_read(DIAG_WS_DCI, len);
938 pr_err("diag: Invalid buffer in %s\n", __func__);
942 if (len < (sizeof(uint8_t) + sizeof(uint32_t))) {
943 pr_err("diag: In %s, invalid length %d\n", __func__, len);
947 /* Skip the Control packet command code */
948 temp += sizeof(uint8_t);
949 len -= sizeof(uint8_t);
950 ctrl_pkt_id = *(uint32_t *)temp;
951 switch (ctrl_pkt_id) {
952 case DIAG_CTRL_MSG_DCI_CONNECTION_STATUS:
953 dci_process_ctrl_status(temp, len, token);
955 case DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT:
956 dci_process_ctrl_handshake_pkt(temp, len, token);
959 pr_debug("diag: In %s, unknown control pkt %d\n",
960 __func__, ctrl_pkt_id);
966 * DCI control packets are not consumed by the clients. Mimic client
967 * consumption by setting and clearing the wakeup source copy_count
970 diag_ws_on_copy_fail(DIAG_WS_DCI);
973 void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
977 struct diag_dci_client_tbl *entry = NULL;
978 void *temp_buf = NULL;
979 uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
980 uint32_t rsp_len = 0;
981 struct diag_dci_buffer_t *rsp_buf = NULL;
982 struct dci_pkt_req_entry_t *req_entry = NULL;
983 unsigned char *temp = buf;
984 int save_req_uid = 0;
985 struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
987 if (!buf || len <= 0) {
988 pr_err("diag: Invalid pointer in %s\n", __func__);
991 dci_cmd_code = *(uint8_t *)(temp);
992 if (dci_cmd_code == DCI_PKT_RSP_CODE) {
993 cmd_code_len = sizeof(uint8_t);
994 } else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
995 cmd_code_len = sizeof(uint32_t);
997 pr_err("diag: In %s, invalid command code %d\n", __func__,
1001 if (len < (cmd_code_len + sizeof(int)))
1003 temp += cmd_code_len;
1005 temp += sizeof(int);
1008 * The size of the response is (total length) - (length of the command
1009 * code, the tag (int)
1011 if (len >= cmd_code_len + sizeof(int)) {
1012 rsp_len = len - (cmd_code_len + sizeof(int));
1013 if ((rsp_len == 0) || (rsp_len > (len - 5))) {
1014 pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d\n",
1015 __func__, len, rsp_len);
1019 pr_err("diag:%s: Invalid length(%d) for calculating rsp_len\n",
1024 mutex_lock(&driver->dci_mutex);
1025 req_entry = diag_dci_get_request_entry(tag);
1027 pr_err_ratelimited("diag: No matching client for DCI data\n");
1028 mutex_unlock(&driver->dci_mutex);
1032 entry = diag_dci_get_client_entry(req_entry->client_id);
1034 pr_err("diag: In %s, couldn't find client entry, id:%d\n",
1035 __func__, req_entry->client_id);
1036 mutex_unlock(&driver->dci_mutex);
1040 save_req_uid = req_entry->uid;
1041 /* Remove the headers and send only the response to this function */
1042 delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
1043 if (delete_flag < 0) {
1044 mutex_unlock(&driver->dci_mutex);
1048 mutex_lock(&entry->buffers[data_source].buf_mutex);
1049 rsp_buf = entry->buffers[data_source].buf_cmd;
1051 mutex_lock(&rsp_buf->data_mutex);
1053 * Check if we can fit the data in the rsp buffer. The total length of
1054 * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
1055 * + field for length (int) + delete_flag (uint8_t)
1057 if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
1058 pr_alert("diag: create capacity for pkt rsp\n");
1059 rsp_buf->capacity += 9 + rsp_len;
1060 temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
1063 pr_err("diag: DCI realloc failed\n");
1064 mutex_unlock(&rsp_buf->data_mutex);
1065 mutex_unlock(&entry->buffers[data_source].buf_mutex);
1066 mutex_unlock(&driver->dci_mutex);
1069 rsp_buf->data = temp_buf;
1073 /* Fill in packet response header information */
1074 pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
1075 /* Packet Length = Response Length + Length of uid field (int) */
1076 pkt_rsp_header.length = rsp_len + sizeof(int);
1077 pkt_rsp_header.delete_flag = delete_flag;
1078 pkt_rsp_header.uid = save_req_uid;
1079 memcpy(rsp_buf->data + rsp_buf->data_len, &pkt_rsp_header,
1080 sizeof(struct diag_dci_pkt_rsp_header_t));
1081 rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
1082 memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
1083 rsp_buf->data_len += rsp_len;
1084 rsp_buf->data_source = data_source;
1086 mutex_unlock(&rsp_buf->data_mutex);
1089 * Add directly to the list for writing responses to the
1090 * userspace as these shouldn't be buffered and shouldn't wait
1091 * for log and event buffers to be full
1093 dci_add_buffer_to_list(entry, rsp_buf);
1094 mutex_unlock(&entry->buffers[data_source].buf_mutex);
1095 mutex_unlock(&driver->dci_mutex);
1098 static void copy_ext_hdr(struct diag_dci_buffer_t *data_buffer, void *ext_hdr)
1101 pr_err("diag: In %s, data buffer is NULL", __func__);
1105 *(int *)(data_buffer->data + data_buffer->data_len) =
1107 data_buffer->data_len += sizeof(int);
1108 memcpy(data_buffer->data + data_buffer->data_len, ext_hdr,
1110 data_buffer->data_len += EXT_HDR_LEN;
1113 static void copy_dci_event(unsigned char *buf, int len,
1114 struct diag_dci_client_tbl *client, int data_source,
1117 struct diag_dci_buffer_t *data_buffer = NULL;
1118 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
1119 int err = 0, total_len = 0;
1121 if (!buf || !client) {
1122 pr_err("diag: Invalid pointers in %s", __func__);
1126 total_len = sizeof(int) + len;
1128 total_len += sizeof(int) + EXT_HDR_LEN;
1130 proc_buf = &client->buffers[data_source];
1131 mutex_lock(&proc_buf->buf_mutex);
1132 mutex_lock(&proc_buf->health_mutex);
1133 err = diag_dci_get_buffer(client, data_source, total_len);
1136 proc_buf->health.dropped_events++;
1138 pr_err("diag: In %s, invalid packet\n", __func__);
1139 mutex_unlock(&proc_buf->health_mutex);
1140 mutex_unlock(&proc_buf->buf_mutex);
1144 data_buffer = proc_buf->buf_curr;
1146 proc_buf->health.received_events++;
1147 mutex_unlock(&proc_buf->health_mutex);
1148 mutex_unlock(&proc_buf->buf_mutex);
1150 mutex_lock(&data_buffer->data_mutex);
1152 copy_ext_hdr(data_buffer, ext_hdr);
1154 *(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
1155 data_buffer->data_len += sizeof(int);
1156 memcpy(data_buffer->data + data_buffer->data_len, buf, len);
1157 data_buffer->data_len += len;
1158 data_buffer->data_source = data_source;
1159 mutex_unlock(&data_buffer->data_mutex);
1163 void extract_dci_events(unsigned char *buf, int len, int data_source,
1164 int token, void *ext_hdr)
1166 uint16_t event_id, event_id_packet, length, temp_len;
1167 uint8_t payload_len, payload_len_field;
1168 uint8_t timestamp[8] = {0}, timestamp_len;
1169 unsigned char event_data[MAX_EVENT_SIZE];
1170 unsigned int total_event_len;
1171 struct list_head *start, *temp;
1172 struct diag_dci_client_tbl *entry = NULL;
1175 pr_err("diag: In %s buffer is NULL\n", __func__);
1179 * 1 byte for event code and 2 bytes for the length field.
1180 * The length field indicates the total length removing the cmd_code
1181 * and the lenght field. The event parsing in that case should happen
1185 pr_err("diag: In %s invalid len: %d\n", __func__, len);
1188 length = *(uint16_t *)(buf + 1); /* total length of event series */
1189 if ((length == 0) || (len != (length + 3))) {
1190 pr_err("diag: Incoming dci event length: %d is invalid\n",
1195 * Move directly to the start of the event series.
1196 * The event parsing should happen from start of event
1197 * series till the end.
1200 while (temp_len < length) {
1201 event_id_packet = *(uint16_t *)(buf + temp_len);
1202 event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
1203 if (event_id_packet & 0x8000) {
1204 /* The packet has the two smallest byte of the
1209 /* The packet has the full timestamp. The first event
1210 * will always have full timestamp. Save it in the
1211 * timestamp buffer and use it for subsequent events if
1215 if ((temp_len + timestamp_len + 2) <= len)
1216 memcpy(timestamp, buf + temp_len + 2,
1219 pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
1220 __func__, len, temp_len);
1224 /* 13th and 14th bit represent the payload length */
1225 if (((event_id_packet & 0x6000) >> 13) == 3) {
1226 payload_len_field = 1;
1227 if ((temp_len + timestamp_len + 3) <= len) {
1228 payload_len = *(uint8_t *)
1229 (buf + temp_len + 2 + timestamp_len);
1231 pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
1232 __func__, len, temp_len);
1235 if ((payload_len < (MAX_EVENT_SIZE - 13)) &&
1236 ((temp_len + timestamp_len + payload_len + 3) <= len)) {
1238 * Copy the payload length and the payload
1239 * after skipping temp_len bytes for already
1240 * parsed packet, timestamp_len for timestamp
1241 * buffer, 2 bytes for event_id_packet.
1243 memcpy(event_data + 12, buf + temp_len + 2 +
1245 memcpy(event_data + 13, buf + temp_len + 2 +
1246 timestamp_len + 1, payload_len);
1248 pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
1249 (MAX_EVENT_SIZE - 13), payload_len, temp_len);
1253 payload_len_field = 0;
1254 payload_len = (event_id_packet & 0x6000) >> 13;
1256 * Copy the payload after skipping temp_len bytes
1257 * for already parsed packet, timestamp_len for
1258 * timestamp buffer, 2 bytes for event_id_packet.
1260 if ((payload_len < (MAX_EVENT_SIZE - 12)) &&
1261 ((temp_len + timestamp_len + payload_len + 2) <= len))
1262 memcpy(event_data + 12, buf + temp_len + 2 +
1263 timestamp_len, payload_len);
1265 pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
1266 (MAX_EVENT_SIZE - 12), payload_len, temp_len);
1271 /* Before copying the data to userspace, check if we are still
1272 * within the buffer limit. This is an error case, don't count
1273 * it towards the health statistics.
1275 * Here, the offset of 2 bytes(uint16_t) is for the
1276 * event_id_packet length
1278 temp_len += sizeof(uint16_t) + timestamp_len +
1279 payload_len_field + payload_len;
1280 if (temp_len > len) {
1281 pr_err("diag: Invalid length in %s, len: %d, read: %d",
1282 __func__, len, temp_len);
1286 /* 2 bytes for the event id & timestamp len is hard coded to 8,
1287 as individual events have full timestamp */
1288 *(uint16_t *)(event_data) = 10 +
1289 payload_len_field + payload_len;
1290 *(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
1291 memcpy(event_data + 4, timestamp, 8);
1292 /* 2 bytes for the event length field which is added to
1294 total_event_len = 2 + 10 + payload_len_field + payload_len;
1295 /* parse through event mask tbl of each client and check mask */
1296 mutex_lock(&driver->dci_mutex);
1297 list_for_each_safe(start, temp, &driver->dci_client_list) {
1298 entry = list_entry(start, struct diag_dci_client_tbl,
1300 if (entry->client_info.token != token)
1302 if (diag_dci_query_event_mask(entry, event_id)) {
1303 /* copy to client buffer */
1304 copy_dci_event(event_data, total_event_len,
1305 entry, data_source, ext_hdr);
1308 mutex_unlock(&driver->dci_mutex);
1312 static void copy_dci_log(unsigned char *buf, int len,
1313 struct diag_dci_client_tbl *client, int data_source,
1316 uint16_t log_length = 0;
1317 struct diag_dci_buffer_t *data_buffer = NULL;
1318 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
1319 int err = 0, total_len = 0;
1321 if (!buf || !client) {
1322 pr_err("diag: Invalid pointers in %s", __func__);
1326 log_length = *(uint16_t *)(buf + 2);
1327 if (log_length > USHRT_MAX - 4) {
1328 pr_err("diag: Integer overflow in %s, log_len: %d",
1329 __func__, log_length);
1332 total_len = sizeof(int) + log_length;
1334 total_len += sizeof(int) + EXT_HDR_LEN;
1336 /* Check if we are within the len. The check should include the
1337 * first 4 bytes for the Log code(2) and the length bytes (2)
1339 if ((log_length + sizeof(uint16_t) + 2) > len) {
1340 pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
1341 __func__, log_length, len);
1345 proc_buf = &client->buffers[data_source];
1346 mutex_lock(&proc_buf->buf_mutex);
1347 mutex_lock(&proc_buf->health_mutex);
1348 err = diag_dci_get_buffer(client, data_source, total_len);
1351 proc_buf->health.dropped_logs++;
1353 pr_err("diag: In %s, invalid packet\n", __func__);
1354 mutex_unlock(&proc_buf->health_mutex);
1355 mutex_unlock(&proc_buf->buf_mutex);
1359 data_buffer = proc_buf->buf_curr;
1360 proc_buf->health.received_logs++;
1361 mutex_unlock(&proc_buf->health_mutex);
1362 mutex_unlock(&proc_buf->buf_mutex);
1364 mutex_lock(&data_buffer->data_mutex);
1365 if (!data_buffer->data) {
1366 mutex_unlock(&data_buffer->data_mutex);
1370 copy_ext_hdr(data_buffer, ext_hdr);
1372 *(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
1373 data_buffer->data_len += sizeof(int);
1374 memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
1376 data_buffer->data_len += log_length;
1377 data_buffer->data_source = data_source;
1378 mutex_unlock(&data_buffer->data_mutex);
1381 void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
1384 uint16_t log_code, read_bytes = 0;
1385 struct list_head *start, *temp;
1386 struct diag_dci_client_tbl *entry = NULL;
1389 pr_err("diag: In %s buffer is NULL\n", __func__);
1393 * The first eight bytes for the incoming log packet contains
1394 * Command code (2), the length of the packet (2), the length
1395 * of the log (2) and log code (2)
1398 pr_err("diag: In %s invalid len: %d\n", __func__, len);
1402 log_code = *(uint16_t *)(buf + 6);
1403 read_bytes += sizeof(uint16_t) + 6;
1405 /* parse through log mask table of each client and check mask */
1406 mutex_lock(&driver->dci_mutex);
1407 list_for_each_safe(start, temp, &driver->dci_client_list) {
1408 entry = list_entry(start, struct diag_dci_client_tbl, track);
1409 if (entry->client_info.token != token)
1411 if (diag_dci_query_log_mask(entry, log_code)) {
1412 pr_debug("\t log code %x needed by client %d",
1413 log_code, entry->client->tgid);
1414 /* copy to client buffer */
1415 copy_dci_log(buf, len, entry, data_source, ext_hdr);
1418 mutex_unlock(&driver->dci_mutex);
1421 void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
1424 uint8_t version, pkt_cmd_code = 0;
1425 unsigned char *pkt = NULL;
1428 pr_err("diag: In %s buffer is NULL\n", __func__);
1431 if (len < (EXT_HDR_LEN + sizeof(uint8_t))) {
1432 pr_err("diag: In %s invalid len: %d\n", __func__, len);
1436 version = *(uint8_t *)buf + 1;
1437 if (version < EXT_HDR_VERSION) {
1438 pr_err("diag: %s, Extended header with invalid version: %d\n",
1443 pkt = buf + EXT_HDR_LEN;
1444 pkt_cmd_code = *(uint8_t *)pkt;
1447 switch (pkt_cmd_code) {
1449 extract_dci_log(pkt, len, data_source, token, buf);
1451 case EVENT_CMD_CODE:
1452 extract_dci_events(pkt, len, data_source, token, buf);
1455 pr_err("diag: %s unsupported cmd_code: %d, data_source: %d\n",
1456 __func__, pkt_cmd_code, data_source);
1461 void diag_dci_channel_open_work(struct work_struct *work)
1464 char dirty_bits[16];
1465 uint8_t *client_log_mask_ptr;
1466 uint8_t *log_mask_ptr;
1468 struct list_head *start, *temp;
1469 struct diag_dci_client_tbl *entry = NULL;
1471 /* Update apps and peripheral(s) with the dci log and event masks */
1472 memset(dirty_bits, 0, 16 * sizeof(uint8_t));
1475 * From each log entry used by each client, determine
1476 * which log entries in the cumulative logs that need
1477 * to be updated on the peripheral.
1479 mutex_lock(&driver->dci_mutex);
1480 list_for_each_safe(start, temp, &driver->dci_client_list) {
1481 entry = list_entry(start, struct diag_dci_client_tbl, track);
1482 if (entry->client_info.token != DCI_LOCAL_PROC)
1484 client_log_mask_ptr = entry->dci_log_mask;
1485 for (j = 0; j < 16; j++) {
1486 if (*(client_log_mask_ptr+1))
1488 client_log_mask_ptr += 514;
1491 mutex_unlock(&driver->dci_mutex);
1493 mutex_lock(&dci_log_mask_mutex);
1494 /* Update the appropriate dirty bits in the cumulative mask */
1495 log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
1496 for (i = 0; i < 16; i++) {
1498 *(log_mask_ptr+1) = dirty_bits[i];
1500 log_mask_ptr += 514;
1502 mutex_unlock(&dci_log_mask_mutex);
1504 /* Send updated mask to userspace clients */
1505 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
1506 /* Send updated log mask to peripherals */
1507 ret = dci_ops_tbl[DCI_LOCAL_PROC].send_log_mask(DCI_LOCAL_PROC);
1509 /* Send updated event mask to userspace clients */
1510 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
1511 /* Send updated event mask to peripheral */
1512 ret = dci_ops_tbl[DCI_LOCAL_PROC].send_event_mask(DCI_LOCAL_PROC);
1515 void diag_dci_notify_client(int peripheral_mask, int data, int proc)
1518 struct siginfo info;
1519 struct list_head *start, *temp;
1520 struct diag_dci_client_tbl *entry = NULL;
1521 struct pid *pid_struct = NULL;
1522 struct task_struct *dci_task = NULL;
1524 memset(&info, 0, sizeof(struct siginfo));
1525 info.si_code = SI_QUEUE;
1526 info.si_int = (peripheral_mask | data);
1527 if (data == DIAG_STATUS_OPEN)
1528 dci_ops_tbl[proc].peripheral_status |= peripheral_mask;
1530 dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
1532 /* Notify the DCI process that the peripheral DCI Channel is up */
1533 mutex_lock(&driver->dci_mutex);
1534 list_for_each_safe(start, temp, &driver->dci_client_list) {
1535 entry = list_entry(start, struct diag_dci_client_tbl, track);
1536 if (entry->client_info.token != proc)
1538 if (entry->client_info.notification_list & peripheral_mask) {
1539 info.si_signo = entry->client_info.signal_type;
1540 pid_struct = find_get_pid(entry->tgid);
1542 dci_task = get_pid_task(pid_struct,
1545 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
1546 "diag: dci client with pid = %d Exited..\n",
1548 put_pid(pid_struct);
1549 mutex_unlock(&driver->dci_mutex);
1552 if (entry->client &&
1553 entry->tgid == dci_task->tgid) {
1554 DIAG_LOG(DIAG_DEBUG_DCI,
1555 "entry tgid = %d, dci client tgid = %d\n",
1556 entry->tgid, dci_task->tgid);
1557 stat = send_sig_info(
1558 entry->client_info.signal_type,
1561 pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
1564 pr_err("diag: client data is corrupted, signal data: 0x%x, stat: %d\n",
1567 put_task_struct(dci_task);
1568 put_pid(pid_struct);
1572 mutex_unlock(&driver->dci_mutex);
1575 static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
1576 unsigned char *buf, int len, int tag)
1578 int i, status = DIAG_DCI_NO_ERROR;
1579 uint32_t write_len = 0;
1580 struct diag_dci_pkt_header_t header;
1585 if (len < 1 || len > DIAG_MAX_REQ_SIZE) {
1586 pr_err("diag: dci: In %s, invalid length %d, max_length: %d\n",
1587 __func__, len, (int)(DCI_REQ_BUF_SIZE - sizeof(header)));
1591 if ((len + sizeof(header) + sizeof(uint8_t)) > DCI_BUF_SIZE) {
1592 pr_err("diag: dci: In %s, invalid length %d for apps_dci_buf, max_length: %d\n",
1593 __func__, len, DIAG_MAX_REQ_SIZE);
1597 mutex_lock(&driver->dci_mutex);
1598 /* prepare DCI packet */
1599 header.start = CONTROL_CHAR;
1601 header.len = len + sizeof(int) + sizeof(uint8_t);
1602 header.pkt_code = DCI_PKT_RSP_CODE;
1604 memcpy(driver->apps_dci_buf, &header, sizeof(header));
1605 write_len += sizeof(header);
1606 memcpy(driver->apps_dci_buf + write_len , buf, len);
1608 *(uint8_t *)(driver->apps_dci_buf + write_len) = CONTROL_CHAR;
1609 write_len += sizeof(uint8_t);
1611 /* This command is registered locally on the Apps */
1612 if (entry->proc == APPS_DATA) {
1613 diag_update_pkt_buffer(driver->apps_dci_buf, write_len,
1615 diag_update_sleeping_process(entry->pid, DCI_PKT_TYPE);
1616 mutex_unlock(&driver->dci_mutex);
1617 return DIAG_DCI_NO_ERROR;
1620 for (i = 0; i < NUM_PERIPHERALS; i++)
1621 if (entry->proc == i) {
1627 status = diag_dci_write_proc(entry->proc,
1629 driver->apps_dci_buf,
1632 pr_err("diag: Cannot send packet to peripheral %d",
1634 status = DIAG_DCI_SEND_DATA_FAIL;
1636 mutex_unlock(&driver->dci_mutex);
1640 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1641 unsigned char *dci_get_buffer_from_bridge(int token)
1643 uint8_t retries = 0, max_retries = 3;
1644 unsigned char *buf = NULL;
1647 buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
1648 dci_ops_tbl[token].mempool);
1650 usleep_range(5000, 5100);
1654 } while (retries < max_retries);
1659 int diag_dci_write_bridge(int token, unsigned char *buf, int len)
1661 return diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, len);
1664 int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
1666 int token = BRIDGE_TO_TOKEN(index);
1667 if (!VALID_DCI_TOKEN(token)) {
1668 pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
1671 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1676 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1677 static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
1680 unsigned char *buf = NULL;
1681 struct diag_dci_header_t dci_header;
1682 int dci_header_size = sizeof(struct diag_dci_header_t);
1683 int ret = DIAG_DCI_NO_ERROR;
1684 uint32_t write_len = 0;
1689 buf = dci_get_buffer_from_bridge(token);
1691 pr_err("diag: In %s, unable to get dci buffers to write data\n",
1696 dci_header.start = CONTROL_CHAR;
1697 dci_header.version = 1;
1699 * The Length of the DCI packet = length of the command + tag (int) +
1700 * the command code size (uint8_t)
1702 dci_header.length = len + sizeof(int) + sizeof(uint8_t);
1703 dci_header.cmd_code = DCI_PKT_RSP_CODE;
1705 memcpy(buf + write_len, &dci_header, dci_header_size);
1706 write_len += dci_header_size;
1707 *(int *)(buf + write_len) = tag;
1708 write_len += sizeof(int);
1709 memcpy(buf + write_len, data, len);
1711 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
1712 write_len += sizeof(uint8_t);
1714 ret = diag_dci_write_bridge(token, buf, write_len);
1716 pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
1718 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1720 ret = DIAG_DCI_NO_ERROR;
1726 static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
1729 return DIAG_DCI_NO_ERROR;
1733 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1734 int diag_dci_send_handshake_pkt(int index)
1737 int token = BRIDGE_TO_TOKEN(index);
1739 struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
1740 unsigned char *buf = NULL;
1741 struct diag_dci_header_t dci_header;
1743 if (!VALID_DCI_TOKEN(token)) {
1744 pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
1748 buf = dci_get_buffer_from_bridge(token);
1750 pr_err("diag: In %s, unable to get dci buffers to write data\n",
1755 dci_header.start = CONTROL_CHAR;
1756 dci_header.version = 1;
1757 /* Include the cmd code (uint8_t) in the length */
1758 dci_header.length = sizeof(ctrl_pkt) + sizeof(uint8_t);
1759 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
1760 memcpy(buf, &dci_header, sizeof(dci_header));
1761 write_len += sizeof(dci_header);
1763 ctrl_pkt.ctrl_pkt_id = DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT;
1765 * The control packet data length accounts for the version (uint32_t)
1766 * of the packet and the magic number (uint32_t).
1768 ctrl_pkt.ctrl_pkt_data_len = 2 * sizeof(uint32_t);
1769 ctrl_pkt.version = 1;
1770 ctrl_pkt.magic = DCI_MAGIC;
1771 memcpy(buf + write_len, &ctrl_pkt, sizeof(ctrl_pkt));
1772 write_len += sizeof(ctrl_pkt);
1774 *(uint8_t *)(buf + write_len) = CONTROL_CHAR;
1775 write_len += sizeof(uint8_t);
1777 err = diag_dci_write_bridge(token, buf, write_len);
1779 pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
1781 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1785 mod_timer(&(dci_channel_status[token].wait_time),
1786 jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
1791 int diag_dci_send_handshake_pkt(int index)
1797 static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
1798 unsigned char *req_buf, int req_len,
1801 uint8_t cmd_code, subsys_id, i, goto_download = 0;
1802 uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
1803 uint16_t ss_cmd_code;
1804 uint32_t write_len = 0;
1805 unsigned char *dest_buf = driver->apps_dci_buf;
1806 unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
1807 struct diag_dci_pkt_header_t dci_header;
1809 if (!pkt_header || !req_buf || req_len <= 0 || tag < 0)
1812 cmd_code = pkt_header->cmd_code;
1813 subsys_id = pkt_header->subsys_id;
1814 ss_cmd_code = pkt_header->subsys_cmd_code;
1816 if (cmd_code == DIAG_CMD_DOWNLOAD) {
1817 *payload_ptr = DIAG_CMD_DOWNLOAD;
1818 write_len = sizeof(uint8_t);
1821 } else if (cmd_code == DIAG_CMD_VERSION) {
1822 if (chk_polling_response()) {
1823 for (i = 0; i < 55; i++, write_len++, payload_ptr++)
1827 } else if (cmd_code == DIAG_CMD_EXT_BUILD) {
1828 if (chk_polling_response()) {
1829 *payload_ptr = DIAG_CMD_EXT_BUILD;
1830 write_len = sizeof(uint8_t);
1831 payload_ptr += sizeof(uint8_t);
1832 for (i = 0; i < 8; i++, write_len++, payload_ptr++)
1834 *(int *)(payload_ptr) = chk_config_get_id();
1835 write_len += sizeof(int);
1838 } else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
1839 write_len = diag_cmd_log_on_demand(req_buf, req_len,
1841 APPS_BUF_SIZE - header_len);
1843 } else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
1844 return DIAG_DCI_TABLE_ERR;
1847 if (subsys_id == DIAG_SS_DIAG) {
1848 if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
1849 memcpy(payload_ptr, pkt_header,
1850 sizeof(struct diag_pkt_header_t));
1851 write_len = sizeof(struct diag_pkt_header_t);
1852 *(uint32_t *)(payload_ptr + write_len) =
1854 write_len += sizeof(uint32_t);
1855 } else if (ss_cmd_code == DIAG_DIAG_STM) {
1856 write_len = diag_process_stm_cmd(req_buf, payload_ptr);
1858 } else if (subsys_id == DIAG_SS_PARAMS) {
1859 if (ss_cmd_code == DIAG_DIAG_POLL) {
1860 if (chk_polling_response()) {
1861 memcpy(payload_ptr, pkt_header,
1862 sizeof(struct diag_pkt_header_t));
1863 write_len = sizeof(struct diag_pkt_header_t);
1864 payload_ptr += write_len;
1865 for (i = 0; i < 12; i++, write_len++) {
1870 } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
1871 memcpy(payload_ptr, pkt_header,
1872 sizeof(struct diag_pkt_header_t));
1873 write_len = sizeof(struct diag_pkt_header_t);
1874 *(int *)(payload_ptr + write_len) = wrap_enabled;
1875 write_len += sizeof(int);
1876 } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
1877 wrap_enabled = true;
1878 memcpy(payload_ptr, pkt_header,
1879 sizeof(struct diag_pkt_header_t));
1880 write_len = sizeof(struct diag_pkt_header_t);
1881 *(uint16_t *)(payload_ptr + write_len) = wrap_count;
1882 write_len += sizeof(uint16_t);
1883 } else if (ss_cmd_code == DIAG_EXT_MOBILE_ID) {
1884 write_len = diag_cmd_get_mobile_id(req_buf, req_len,
1886 APPS_BUF_SIZE - header_len);
1891 if (write_len > 0) {
1892 /* Check if we are within the range of the buffer*/
1893 if (write_len + header_len > DIAG_MAX_REQ_SIZE) {
1894 pr_err("diag: In %s, invalid length %d\n", __func__,
1895 write_len + header_len);
1898 dci_header.start = CONTROL_CHAR;
1899 dci_header.version = 1;
1901 * Length of the rsp pkt = actual data len + pkt rsp code
1902 * (uint8_t) + tag (int)
1904 dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
1905 dci_header.pkt_code = DCI_PKT_RSP_CODE;
1906 dci_header.tag = tag;
1907 driver->in_busy_dcipktdata = 1;
1908 memcpy(dest_buf, &dci_header, header_len);
1909 diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
1911 driver->in_busy_dcipktdata = 0;
1913 if (goto_download) {
1915 * Sleep for sometime so that the response reaches the
1916 * client. The value 5000 empirically as an optimum
1917 * time for the response to reach the client.
1919 usleep_range(5000, 5100);
1920 /* call download API */
1921 msm_set_restart_mode(RESTART_DLOAD);
1922 pr_alert("diag: download mode set, Rebooting SoC..\n");
1923 kernel_restart(NULL);
1925 return DIAG_DCI_NO_ERROR;
1928 return DIAG_DCI_TABLE_ERR;
1931 static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
1933 int ret = DIAG_DCI_TABLE_ERR;
1935 struct diag_pkt_header_t *header = NULL;
1936 unsigned char *temp = buf;
1937 unsigned char *req_buf = NULL;
1938 uint8_t retry_count = 0, max_retries = 3;
1939 uint32_t read_len = 0, req_len = len;
1940 struct dci_pkt_req_entry_t *req_entry = NULL;
1941 struct diag_dci_client_tbl *dci_entry = NULL;
1942 struct dci_pkt_req_t req_hdr;
1943 struct diag_cmd_reg_t *reg_item;
1944 struct diag_cmd_reg_entry_t reg_entry;
1945 struct diag_cmd_reg_entry_t *temp_entry;
1950 if (len <= (sizeof(struct dci_pkt_req_t) +
1951 sizeof(struct diag_pkt_header_t)) ||
1952 len > DCI_REQ_BUF_SIZE) {
1953 pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
1957 req_hdr = *(struct dci_pkt_req_t *)temp;
1958 temp += sizeof(struct dci_pkt_req_t);
1959 read_len += sizeof(struct dci_pkt_req_t);
1960 req_len -= sizeof(struct dci_pkt_req_t);
1961 req_buf = temp; /* Start of the Request */
1962 header = (struct diag_pkt_header_t *)temp;
1963 temp += sizeof(struct diag_pkt_header_t);
1964 read_len += sizeof(struct diag_pkt_header_t);
1965 if (read_len >= DCI_REQ_BUF_SIZE) {
1966 pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
1971 mutex_lock(&driver->dci_mutex);
1972 dci_entry = diag_dci_get_client_entry(req_hdr.client_id);
1974 pr_err("diag: Invalid client %d in %s\n",
1975 req_hdr.client_id, __func__);
1976 mutex_unlock(&driver->dci_mutex);
1977 return DIAG_DCI_NO_REG;
1980 /* Check if the command is allowed on DCI */
1981 if (diag_dci_filter_commands(header)) {
1982 pr_debug("diag: command not supported %d %d %d",
1983 header->cmd_code, header->subsys_id,
1984 header->subsys_cmd_code);
1985 mutex_unlock(&driver->dci_mutex);
1986 return DIAG_DCI_SEND_DATA_FAIL;
1989 common_cmd = diag_check_common_cmd(header);
1990 if (common_cmd < 0) {
1991 pr_debug("diag: error in checking common command, %d\n",
1993 mutex_unlock(&driver->dci_mutex);
1994 return DIAG_DCI_SEND_DATA_FAIL;
1998 * Previous packet is yet to be consumed by the client. Wait
1999 * till the buffer is free.
2001 while (retry_count < max_retries) {
2003 if (driver->in_busy_dcipktdata)
2004 usleep_range(10000, 10100);
2008 /* The buffer is still busy */
2009 if (driver->in_busy_dcipktdata) {
2010 pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
2012 mutex_unlock(&driver->dci_mutex);
2016 /* Register this new DCI packet */
2017 req_entry = diag_register_dci_transaction(req_hdr.uid,
2020 pr_alert("diag: registering new DCI transaction failed\n");
2021 mutex_unlock(&driver->dci_mutex);
2022 return DIAG_DCI_NO_REG;
2024 mutex_unlock(&driver->dci_mutex);
2027 * If the client has registered for remote data, route the packet to the
2030 if (dci_entry->client_info.token > 0) {
2031 ret = diag_send_dci_pkt_remote(req_buf, req_len, req_entry->tag,
2032 dci_entry->client_info.token);
2036 /* Check if it is a dedicated Apps command */
2037 ret = diag_dci_process_apps_pkt(header, req_buf, req_len,
2039 if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
2042 reg_entry.cmd_code = header->cmd_code;
2043 reg_entry.subsys_id = header->subsys_id;
2044 reg_entry.cmd_code_hi = header->subsys_cmd_code;
2045 reg_entry.cmd_code_lo = header->subsys_cmd_code;
2047 mutex_lock(&driver->cmd_reg_mutex);
2048 temp_entry = diag_cmd_search(®_entry, ALL_PROC);
2050 reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
2052 ret = diag_send_dci_pkt(reg_item, req_buf, req_len,
2055 DIAG_LOG(DIAG_DEBUG_DCI, "Command not found: %02x %02x %02x\n",
2056 reg_entry.cmd_code, reg_entry.subsys_id,
2057 reg_entry.cmd_code_hi);
2059 mutex_unlock(&driver->cmd_reg_mutex);
2064 int diag_process_dci_transaction(unsigned char *buf, int len)
2066 unsigned char *temp = buf;
2067 uint16_t log_code, item_num;
2068 int ret = -1, found = 0, client_id = 0, client_token = 0;
2069 int count, set_mask, num_codes, bit_index, event_id, offset = 0;
2070 unsigned int byte_index, read_len = 0;
2071 uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
2072 uint8_t *event_mask_ptr;
2073 struct diag_dci_client_tbl *dci_entry = NULL;
2076 pr_err("diag: Invalid buffer in %s\n", __func__);
2080 /* This is Pkt request/response transaction */
2081 if (*(int *)temp > 0) {
2082 return diag_process_dci_pkt_rsp(buf, len);
2083 } else if (*(int *)temp == DCI_LOG_TYPE) {
2084 /* Minimum length of a log mask config is 12 + 2 bytes for
2085 atleast one log code to be set or reset */
2086 if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
2087 pr_err("diag: dci: Invalid length in %s\n", __func__);
2091 /* Extract each log code and put in client table */
2092 temp += sizeof(int);
2093 read_len += sizeof(int);
2094 client_id = *(int *)temp;
2095 temp += sizeof(int);
2096 read_len += sizeof(int);
2097 set_mask = *(int *)temp;
2098 temp += sizeof(int);
2099 read_len += sizeof(int);
2100 num_codes = *(int *)temp;
2101 temp += sizeof(int);
2102 read_len += sizeof(int);
2104 /* find client table entry */
2105 mutex_lock(&driver->dci_mutex);
2106 dci_entry = diag_dci_get_client_entry(client_id);
2108 pr_err("diag: In %s, invalid client\n", __func__);
2109 mutex_unlock(&driver->dci_mutex);
2112 client_token = dci_entry->client_info.token;
2114 if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
2115 pr_err("diag: dci: Invalid number of log codes %d\n",
2117 mutex_unlock(&driver->dci_mutex);
2121 head_log_mask_ptr = dci_entry->dci_log_mask;
2122 if (!head_log_mask_ptr) {
2123 pr_err("diag: dci: Invalid Log mask pointer in %s\n",
2125 mutex_unlock(&driver->dci_mutex);
2128 pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
2129 count = 0; /* iterator for extracting log codes */
2131 while (count < num_codes) {
2132 if (read_len >= USER_SPACE_DATA) {
2133 pr_err("diag: dci: Invalid length for log type in %s",
2135 mutex_unlock(&driver->dci_mutex);
2138 log_code = *(uint16_t *)temp;
2139 equip_id = LOG_GET_EQUIP_ID(log_code);
2140 item_num = LOG_GET_ITEM_NUM(log_code);
2141 byte_index = item_num/8 + 2;
2142 if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
2143 pr_err("diag: dci: Log type, invalid byte index\n");
2144 mutex_unlock(&driver->dci_mutex);
2147 byte_mask = 0x01 << (item_num % 8);
2149 * Parse through log mask table and find
2152 log_mask_ptr = head_log_mask_ptr;
2155 while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
2156 if (*log_mask_ptr == equip_id) {
2158 pr_debug("diag: find equip id = %x at %pK\n",
2159 equip_id, log_mask_ptr);
2162 pr_debug("diag: did not find equip id = %x at %d\n",
2163 equip_id, *log_mask_ptr);
2164 log_mask_ptr += 514;
2169 pr_err("diag: dci equip id not found\n");
2170 mutex_unlock(&driver->dci_mutex);
2173 *(log_mask_ptr+1) = 1; /* set the dirty byte */
2174 log_mask_ptr = log_mask_ptr + byte_index;
2176 *log_mask_ptr |= byte_mask;
2178 *log_mask_ptr &= ~byte_mask;
2179 /* add to cumulative mask */
2180 update_dci_cumulative_log_mask(
2182 byte_mask, client_token);
2186 ret = DIAG_DCI_NO_ERROR;
2188 /* send updated mask to userspace clients */
2189 if (client_token == DCI_LOCAL_PROC)
2190 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
2191 /* send updated mask to peripherals */
2192 ret = dci_ops_tbl[client_token].send_log_mask(client_token);
2193 mutex_unlock(&driver->dci_mutex);
2194 } else if (*(int *)temp == DCI_EVENT_TYPE) {
2195 /* Minimum length of a event mask config is 12 + 4 bytes for
2196 atleast one event id to be set or reset. */
2197 if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
2198 pr_err("diag: dci: Invalid length in %s\n", __func__);
2202 /* Extract each event id and put in client table */
2203 temp += sizeof(int);
2204 read_len += sizeof(int);
2205 client_id = *(int *)temp;
2206 temp += sizeof(int);
2207 read_len += sizeof(int);
2208 set_mask = *(int *)temp;
2209 temp += sizeof(int);
2210 read_len += sizeof(int);
2211 num_codes = *(int *)temp;
2212 temp += sizeof(int);
2213 read_len += sizeof(int);
2215 /* find client table entry */
2216 mutex_lock(&driver->dci_mutex);
2217 dci_entry = diag_dci_get_client_entry(client_id);
2219 pr_err("diag: In %s, invalid client\n", __func__);
2220 mutex_unlock(&driver->dci_mutex);
2223 client_token = dci_entry->client_info.token;
2225 /* Check for positive number of event ids. Also, the number of
2226 event ids should fit in the buffer along with set_mask and
2227 num_codes which are 4 bytes each */
2228 if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
2229 pr_err("diag: dci: Invalid number of event ids %d\n",
2231 mutex_unlock(&driver->dci_mutex);
2235 event_mask_ptr = dci_entry->dci_event_mask;
2236 if (!event_mask_ptr) {
2237 pr_err("diag: dci: Invalid event mask pointer in %s\n",
2239 mutex_unlock(&driver->dci_mutex);
2242 pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
2243 count = 0; /* iterator for extracting log codes */
2244 while (count < num_codes) {
2245 if (read_len >= USER_SPACE_DATA) {
2246 pr_err("diag: dci: Invalid length for event type in %s",
2248 mutex_unlock(&driver->dci_mutex);
2251 event_id = *(int *)temp;
2252 byte_index = event_id/8;
2253 if (byte_index >= DCI_EVENT_MASK_SIZE) {
2254 pr_err("diag: dci: Event type, invalid byte index\n");
2255 mutex_unlock(&driver->dci_mutex);
2258 bit_index = event_id % 8;
2259 byte_mask = 0x1 << bit_index;
2261 * Parse through event mask table and set
2262 * relevant byte & bit combination
2265 *(event_mask_ptr + byte_index) |= byte_mask;
2267 *(event_mask_ptr + byte_index) &= ~byte_mask;
2268 /* add to cumulative mask */
2269 update_dci_cumulative_event_mask(byte_index, byte_mask,
2271 temp += sizeof(int);
2272 read_len += sizeof(int);
2274 ret = DIAG_DCI_NO_ERROR;
2276 /* send updated mask to userspace clients */
2277 if (dci_entry->client_info.token == DCI_LOCAL_PROC)
2278 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
2279 /* send updated mask to peripherals */
2280 ret = dci_ops_tbl[client_token].send_event_mask(client_token);
2281 mutex_unlock(&driver->dci_mutex);
2283 pr_alert("diag: Incorrect DCI transaction\n");
2289 struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id)
2291 struct list_head *start, *temp;
2292 struct diag_dci_client_tbl *entry = NULL;
2293 list_for_each_safe(start, temp, &driver->dci_client_list) {
2294 entry = list_entry(start, struct diag_dci_client_tbl, track);
2295 if (entry->client_info.client_id == client_id)
2301 struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
2303 struct list_head *start, *temp;
2304 struct diag_dci_client_tbl *entry = NULL;
2305 struct pid *pid_struct = NULL;
2306 struct task_struct *task_s = NULL;
2308 list_for_each_safe(start, temp, &driver->dci_client_list) {
2309 entry = list_entry(start, struct diag_dci_client_tbl, track);
2310 pid_struct = find_get_pid(entry->tgid);
2312 DIAG_LOG(DIAG_DEBUG_DCI,
2313 "diag: Exited pid (%d) doesn't match dci client of pid (%d)\n",
2317 task_s = get_pid_task(pid_struct, PIDTYPE_PID);
2319 DIAG_LOG(DIAG_DEBUG_DCI,
2320 "diag: valid task doesn't exist for pid = %d\n",
2322 put_pid(pid_struct);
2325 if (task_s == entry->client) {
2326 if (entry->client->tgid == tgid) {
2327 put_task_struct(task_s);
2328 put_pid(pid_struct);
2332 put_task_struct(task_s);
2333 put_pid(pid_struct);
2338 void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token)
2340 uint8_t *event_mask_ptr, *update_ptr = NULL;
2341 struct list_head *start, *temp;
2342 struct diag_dci_client_tbl *entry = NULL;
2343 bool is_set = false;
2345 mutex_lock(&dci_event_mask_mutex);
2346 update_ptr = dci_ops_tbl[token].event_mask_composite;
2348 mutex_unlock(&dci_event_mask_mutex);
2351 update_ptr += offset;
2352 list_for_each_safe(start, temp, &driver->dci_client_list) {
2353 entry = list_entry(start, struct diag_dci_client_tbl, track);
2354 if (entry->client_info.token != token)
2356 event_mask_ptr = entry->dci_event_mask;
2357 event_mask_ptr += offset;
2358 if ((*event_mask_ptr & byte_mask) == byte_mask) {
2360 /* break even if one client has the event mask set */
2364 if (is_set == false)
2365 *update_ptr &= ~byte_mask;
2367 *update_ptr |= byte_mask;
2368 mutex_unlock(&dci_event_mask_mutex);
2371 void diag_dci_invalidate_cumulative_event_mask(int token)
2374 struct list_head *start, *temp;
2375 struct diag_dci_client_tbl *entry = NULL;
2376 uint8_t *event_mask_ptr, *update_ptr = NULL;
2378 mutex_lock(&dci_event_mask_mutex);
2379 update_ptr = dci_ops_tbl[token].event_mask_composite;
2381 mutex_unlock(&dci_event_mask_mutex);
2385 create_dci_event_mask_tbl(update_ptr);
2386 list_for_each_safe(start, temp, &driver->dci_client_list) {
2387 entry = list_entry(start, struct diag_dci_client_tbl, track);
2388 if (entry->client_info.token != token)
2390 event_mask_ptr = entry->dci_event_mask;
2391 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
2392 *(update_ptr+i) |= *(event_mask_ptr+i);
2394 mutex_unlock(&dci_event_mask_mutex);
2397 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2398 int diag_send_dci_event_mask_remote(int token)
2400 unsigned char *buf = NULL;
2401 struct diag_dci_header_t dci_header;
2402 struct diag_ctrl_event_mask event_mask;
2403 int dci_header_size = sizeof(struct diag_dci_header_t);
2404 int event_header_size = sizeof(struct diag_ctrl_event_mask);
2405 int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2406 unsigned char *event_mask_ptr = NULL;
2407 uint32_t write_len = 0;
2409 mutex_lock(&dci_event_mask_mutex);
2410 event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
2411 if (!event_mask_ptr) {
2412 mutex_unlock(&dci_event_mask_mutex);
2415 buf = dci_get_buffer_from_bridge(token);
2417 pr_err("diag: In %s, unable to get dci buffers to write data\n",
2419 mutex_unlock(&dci_event_mask_mutex);
2423 /* Frame the DCI header */
2424 dci_header.start = CONTROL_CHAR;
2425 dci_header.version = 1;
2426 dci_header.length = event_header_size + DCI_EVENT_MASK_SIZE + 1;
2427 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
2429 event_mask.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
2430 event_mask.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
2431 event_mask.stream_id = DCI_MASK_STREAM;
2432 event_mask.status = DIAG_CTRL_MASK_VALID;
2433 event_mask.event_config = 0; /* event config */
2434 event_mask.event_mask_size = DCI_EVENT_MASK_SIZE;
2435 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
2436 if (event_mask_ptr[i] != 0) {
2437 event_mask.event_config = 1;
2441 memcpy(buf + write_len, &dci_header, dci_header_size);
2442 write_len += dci_header_size;
2443 memcpy(buf + write_len, &event_mask, event_header_size);
2444 write_len += event_header_size;
2445 memcpy(buf + write_len, event_mask_ptr, DCI_EVENT_MASK_SIZE);
2446 write_len += DCI_EVENT_MASK_SIZE;
2447 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
2448 write_len += sizeof(uint8_t);
2449 err = diag_dci_write_bridge(token, buf, write_len);
2451 pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
2453 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
2456 ret = DIAG_DCI_NO_ERROR;
2458 mutex_unlock(&dci_event_mask_mutex);
2463 int diag_send_dci_event_mask(int token)
2465 void *buf = event_mask.update_buf;
2466 struct diag_ctrl_event_mask header;
2467 int header_size = sizeof(struct diag_ctrl_event_mask);
2468 int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
2469 unsigned char *event_mask_ptr = NULL;
2471 mutex_lock(&dci_event_mask_mutex);
2472 event_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].event_mask_composite;
2473 if (!event_mask_ptr) {
2474 mutex_unlock(&dci_event_mask_mutex);
2478 mutex_lock(&event_mask.lock);
2479 /* send event mask update */
2480 header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
2481 header.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
2482 header.stream_id = DCI_MASK_STREAM;
2483 header.status = DIAG_CTRL_MASK_VALID;
2484 header.event_config = 0; /* event config */
2485 header.event_mask_size = DCI_EVENT_MASK_SIZE;
2486 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
2487 if (event_mask_ptr[i] != 0) {
2488 header.event_config = 1;
2492 memcpy(buf, &header, header_size);
2493 memcpy(buf+header_size, event_mask_ptr, DCI_EVENT_MASK_SIZE);
2494 for (i = 0; i < NUM_PERIPHERALS; i++) {
2496 * Don't send to peripheral if its regular channel
2497 * is down. It may also mean that the peripheral doesn't
2500 if (check_peripheral_dci_support(i, DCI_LOCAL_PROC)) {
2501 err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
2502 header_size + DCI_EVENT_MASK_SIZE);
2503 if (err != DIAG_DCI_NO_ERROR)
2504 ret = DIAG_DCI_SEND_DATA_FAIL;
2508 mutex_unlock(&event_mask.lock);
2509 mutex_unlock(&dci_event_mask_mutex);
2514 void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
2515 uint8_t byte_mask, int token)
2517 uint8_t *log_mask_ptr, *update_ptr = NULL;
2518 bool is_set = false;
2519 struct list_head *start, *temp;
2520 struct diag_dci_client_tbl *entry = NULL;
2522 mutex_lock(&dci_log_mask_mutex);
2523 update_ptr = dci_ops_tbl[token].log_mask_composite;
2525 mutex_unlock(&dci_log_mask_mutex);
2529 update_ptr += offset;
2530 /* update the dirty bit */
2531 *(update_ptr+1) = 1;
2532 update_ptr = update_ptr + byte_index;
2533 list_for_each_safe(start, temp, &driver->dci_client_list) {
2534 entry = list_entry(start, struct diag_dci_client_tbl, track);
2535 if (entry->client_info.token != token)
2537 log_mask_ptr = entry->dci_log_mask;
2538 log_mask_ptr = log_mask_ptr + offset + byte_index;
2539 if ((*log_mask_ptr & byte_mask) == byte_mask) {
2541 /* break even if one client has the log mask set */
2546 if (is_set == false)
2547 *update_ptr &= ~byte_mask;
2549 *update_ptr |= byte_mask;
2550 mutex_unlock(&dci_log_mask_mutex);
2553 void diag_dci_invalidate_cumulative_log_mask(int token)
2556 struct list_head *start, *temp;
2557 struct diag_dci_client_tbl *entry = NULL;
2558 uint8_t *log_mask_ptr, *update_ptr = NULL;
2560 /* Clear the composite mask and redo all the masks */
2561 mutex_lock(&dci_log_mask_mutex);
2562 update_ptr = dci_ops_tbl[token].log_mask_composite;
2564 mutex_unlock(&dci_log_mask_mutex);
2568 create_dci_log_mask_tbl(update_ptr, DCI_LOG_MASK_DIRTY);
2569 list_for_each_safe(start, temp, &driver->dci_client_list) {
2570 entry = list_entry(start, struct diag_dci_client_tbl, track);
2571 if (entry->client_info.token != token)
2573 log_mask_ptr = entry->dci_log_mask;
2574 for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
2575 *(update_ptr+i) |= *(log_mask_ptr+i);
2577 mutex_unlock(&dci_log_mask_mutex);
2580 static int dci_fill_log_mask(unsigned char *dest_ptr, unsigned char *src_ptr)
2582 struct diag_ctrl_log_mask header;
2583 int header_len = sizeof(struct diag_ctrl_log_mask);
2585 header.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
2586 header.num_items = DCI_MAX_ITEMS_PER_LOG_CODE;
2587 header.data_len = 11 + DCI_MAX_ITEMS_PER_LOG_CODE;
2588 header.stream_id = DCI_MASK_STREAM;
2590 header.equip_id = *src_ptr;
2591 header.log_mask_size = DCI_MAX_ITEMS_PER_LOG_CODE;
2592 memcpy(dest_ptr, &header, header_len);
2593 memcpy(dest_ptr + header_len, src_ptr + 2, DCI_MAX_ITEMS_PER_LOG_CODE);
2595 return header_len + DCI_MAX_ITEMS_PER_LOG_CODE;
2598 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2599 int diag_send_dci_log_mask_remote(int token)
2602 unsigned char *buf = NULL;
2603 struct diag_dci_header_t dci_header;
2604 int dci_header_size = sizeof(struct diag_dci_header_t);
2605 int log_header_size = sizeof(struct diag_ctrl_log_mask);
2606 uint8_t *log_mask_ptr = NULL;
2607 int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2609 uint32_t write_len = 0;
2611 mutex_lock(&dci_log_mask_mutex);
2612 log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
2613 if (!log_mask_ptr) {
2614 mutex_unlock(&dci_log_mask_mutex);
2618 /* DCI header is common to all equipment IDs */
2619 dci_header.start = CONTROL_CHAR;
2620 dci_header.version = 1;
2621 dci_header.length = log_header_size + DCI_MAX_ITEMS_PER_LOG_CODE + 1;
2622 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
2624 for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
2627 if (!*(log_mask_ptr + 1)) {
2628 log_mask_ptr += 514;
2632 buf = dci_get_buffer_from_bridge(token);
2634 pr_err("diag: In %s, unable to get dci buffers to write data\n",
2636 mutex_unlock(&dci_log_mask_mutex);
2640 memcpy(buf + write_len, &dci_header, dci_header_size);
2641 write_len += dci_header_size;
2642 write_len += dci_fill_log_mask(buf + write_len, log_mask_ptr);
2643 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
2644 write_len += sizeof(uint8_t);
2645 err = diag_dci_write_bridge(token, buf, write_len);
2647 pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
2649 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
2653 *(log_mask_ptr + 1) = 0; /* clear dirty byte */
2654 log_mask_ptr += 514;
2656 mutex_unlock(&dci_log_mask_mutex);
2661 int diag_send_dci_log_mask(int token)
2663 void *buf = log_mask.update_buf;
2665 uint8_t *log_mask_ptr = NULL;
2666 int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2670 mutex_lock(&dci_log_mask_mutex);
2671 log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
2672 if (!log_mask_ptr) {
2673 mutex_unlock(&dci_log_mask_mutex);
2677 mutex_lock(&log_mask.lock);
2678 for (i = 0; i < 16; i++) {
2680 /* Dirty bit is set don't update the mask for this equip id */
2681 if (!(*(log_mask_ptr + 1))) {
2682 log_mask_ptr += 514;
2685 write_len = dci_fill_log_mask(buf, log_mask_ptr);
2686 for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
2687 if (check_peripheral_dci_support(j, DCI_LOCAL_PROC)) {
2688 err = diag_dci_write_proc(j, DIAG_CNTL_TYPE,
2690 if (err != DIAG_DCI_NO_ERROR) {
2692 ret = DIAG_DCI_SEND_DATA_FAIL;
2697 *(log_mask_ptr+1) = 0; /* clear dirty byte */
2698 log_mask_ptr += 514;
2700 mutex_unlock(&log_mask.lock);
2701 mutex_unlock(&dci_log_mask_mutex);
2705 static int diag_dci_init_local(void)
2707 struct dci_ops_tbl_t *temp = &dci_ops_tbl[DCI_LOCAL_PROC];
2709 create_dci_log_mask_tbl(temp->log_mask_composite, DCI_LOG_MASK_CLEAN);
2710 create_dci_event_mask_tbl(temp->event_mask_composite);
2711 temp->peripheral_status |= DIAG_CON_APSS;
2716 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2717 static void diag_dci_init_handshake_remote(void)
2720 struct dci_channel_status_t *temp = NULL;
2722 for (i = DCI_REMOTE_BASE; i < NUM_DCI_PROC; i++) {
2723 temp = &dci_channel_status[i];
2725 setup_timer(&temp->wait_time, dci_chk_handshake, i);
2726 INIT_WORK(&temp->handshake_work, dci_handshake_work_fn);
2730 static int diag_dci_init_remote(void)
2733 struct dci_ops_tbl_t *temp = NULL;
2735 diagmem_init(driver, POOL_TYPE_MDM_DCI_WRITE);
2737 for (i = DCI_REMOTE_BASE; i < DCI_REMOTE_LAST; i++) {
2738 temp = &dci_ops_tbl[i];
2739 create_dci_log_mask_tbl(temp->log_mask_composite,
2740 DCI_LOG_MASK_CLEAN);
2741 create_dci_event_mask_tbl(temp->event_mask_composite);
2744 partial_pkt.data = vzalloc(MAX_DCI_PACKET_SZ);
2745 if (!partial_pkt.data) {
2746 pr_err("diag: Unable to create partial pkt data\n");
2750 partial_pkt.total_len = 0;
2751 partial_pkt.read_len = 0;
2752 partial_pkt.remaining = 0;
2753 partial_pkt.processing = 0;
2755 diag_dci_init_handshake_remote();
2760 static int diag_dci_init_remote(void)
2766 static int diag_dci_init_ops_tbl(void)
2770 err = diag_dci_init_local();
2773 err = diag_dci_init_remote();
2783 int diag_dci_init(void)
2787 driver->dci_tag = 0;
2788 driver->dci_client_id = 0;
2789 driver->num_dci_client = 0;
2790 mutex_init(&driver->dci_mutex);
2791 mutex_init(&dci_log_mask_mutex);
2792 mutex_init(&dci_event_mask_mutex);
2793 spin_lock_init(&ws_lock);
2795 ret = diag_dci_init_ops_tbl();
2799 if (driver->apps_dci_buf == NULL) {
2800 driver->apps_dci_buf = vzalloc(DCI_BUF_SIZE);
2801 if (driver->apps_dci_buf == NULL)
2804 INIT_LIST_HEAD(&driver->dci_client_list);
2805 INIT_LIST_HEAD(&driver->dci_req_list);
2807 driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
2808 if (!driver->diag_dci_wq)
2811 INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
2813 setup_timer(&dci_drain_timer, dci_drain_data, 0);
2814 return DIAG_DCI_NO_ERROR;
2816 pr_err("diag: Could not initialize diag DCI buffers");
2817 vfree(driver->apps_dci_buf);
2818 driver->apps_dci_buf = NULL;
2820 if (driver->diag_dci_wq)
2821 destroy_workqueue(driver->diag_dci_wq);
2822 vfree(partial_pkt.data);
2823 partial_pkt.data = NULL;
2824 mutex_destroy(&driver->dci_mutex);
2825 mutex_destroy(&dci_log_mask_mutex);
2826 mutex_destroy(&dci_event_mask_mutex);
2827 return DIAG_DCI_NO_REG;
2830 void diag_dci_channel_init(void)
2834 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
2835 diagfwd_open(peripheral, TYPE_DCI);
2836 diagfwd_open(peripheral, TYPE_DCI_CMD);
2840 void diag_dci_exit(void)
2842 vfree(partial_pkt.data);
2843 partial_pkt.data = NULL;
2844 vfree(driver->apps_dci_buf);
2845 driver->apps_dci_buf = NULL;
2846 mutex_destroy(&driver->dci_mutex);
2847 mutex_destroy(&dci_log_mask_mutex);
2848 mutex_destroy(&dci_event_mask_mutex);
2849 destroy_workqueue(driver->diag_dci_wq);
2852 int diag_dci_clear_log_mask(int client_id)
2854 int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
2855 uint8_t *update_ptr;
2856 struct diag_dci_client_tbl *entry = NULL;
2858 entry = diag_dci_get_client_entry(client_id);
2860 pr_err("diag: In %s, invalid client entry\n", __func__);
2861 return DIAG_DCI_TABLE_ERR;
2863 token = entry->client_info.token;
2864 update_ptr = dci_ops_tbl[token].log_mask_composite;
2866 create_dci_log_mask_tbl(entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
2867 diag_dci_invalidate_cumulative_log_mask(token);
2870 * Send updated mask to userspace clients only if the client
2871 * is registered on the local processor
2873 if (token == DCI_LOCAL_PROC)
2874 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
2875 /* Send updated mask to peripherals */
2876 err = dci_ops_tbl[token].send_log_mask(token);
2880 int diag_dci_clear_event_mask(int client_id)
2882 int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
2883 uint8_t *update_ptr;
2884 struct diag_dci_client_tbl *entry = NULL;
2886 entry = diag_dci_get_client_entry(client_id);
2888 pr_err("diag: In %s, invalid client entry\n", __func__);
2889 return DIAG_DCI_TABLE_ERR;
2891 token = entry->client_info.token;
2892 update_ptr = dci_ops_tbl[token].event_mask_composite;
2894 create_dci_event_mask_tbl(entry->dci_event_mask);
2895 diag_dci_invalidate_cumulative_event_mask(token);
2898 * Send updated mask to userspace clients only if the client is
2899 * registerted on the local processor
2901 if (token == DCI_LOCAL_PROC)
2902 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
2903 /* Send updated mask to peripherals */
2904 err = dci_ops_tbl[token].send_event_mask(token);
2908 uint8_t diag_dci_get_cumulative_real_time(int token)
2910 uint8_t real_time = MODE_NONREALTIME;
2911 struct list_head *start, *temp;
2912 struct diag_dci_client_tbl *entry = NULL;
2914 list_for_each_safe(start, temp, &driver->dci_client_list) {
2915 entry = list_entry(start, struct diag_dci_client_tbl, track);
2916 if (entry->real_time == MODE_REALTIME &&
2917 entry->client_info.token == token) {
2925 int diag_dci_set_real_time(struct diag_dci_client_tbl *entry, uint8_t real_time)
2928 pr_err("diag: In %s, invalid client entry\n", __func__);
2931 entry->real_time = real_time;
2935 int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
2938 struct diag_dci_client_tbl *new_entry = NULL;
2939 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
2942 return DIAG_DCI_NO_REG;
2943 if (!VALID_DCI_TOKEN(reg_entry->token)) {
2944 pr_alert("diag: Invalid DCI client token, %d\n",
2946 return DIAG_DCI_NO_REG;
2949 if (driver->dci_state == DIAG_DCI_NO_REG)
2950 return DIAG_DCI_NO_REG;
2952 if (driver->num_dci_client >= MAX_DCI_CLIENTS)
2953 return DIAG_DCI_NO_REG;
2955 new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
2956 if (new_entry == NULL) {
2957 pr_err("diag: unable to alloc memory\n");
2958 return DIAG_DCI_NO_REG;
2961 mutex_lock(&driver->dci_mutex);
2963 get_task_struct(current);
2964 new_entry->client = current;
2965 new_entry->tgid = current->tgid;
2966 new_entry->client_info.notification_list =
2967 reg_entry->notification_list;
2968 new_entry->client_info.signal_type =
2969 reg_entry->signal_type;
2970 new_entry->client_info.token = reg_entry->token;
2971 switch (reg_entry->token) {
2972 case DCI_LOCAL_PROC:
2973 new_entry->num_buffers = NUM_DCI_PERIPHERALS;
2976 new_entry->num_buffers = 1;
2980 new_entry->buffers = NULL;
2981 new_entry->real_time = MODE_REALTIME;
2982 new_entry->in_service = 0;
2983 INIT_LIST_HEAD(&new_entry->list_write_buf);
2984 mutex_init(&new_entry->write_buf_mutex);
2985 new_entry->dci_log_mask = vzalloc(DCI_LOG_MASK_SIZE);
2986 if (!new_entry->dci_log_mask) {
2987 pr_err("diag: Unable to create log mask for client, %d",
2988 driver->dci_client_id);
2991 create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
2993 new_entry->dci_event_mask = vzalloc(DCI_EVENT_MASK_SIZE);
2994 if (!new_entry->dci_event_mask) {
2995 pr_err("diag: Unable to create event mask for client, %d",
2996 driver->dci_client_id);
2999 create_dci_event_mask_tbl(new_entry->dci_event_mask);
3001 new_entry->buffers = kzalloc(new_entry->num_buffers *
3002 sizeof(struct diag_dci_buf_peripheral_t),
3004 if (!new_entry->buffers) {
3005 pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
3010 for (i = 0; i < new_entry->num_buffers; i++) {
3011 proc_buf = &new_entry->buffers[i];
3015 mutex_init(&proc_buf->health_mutex);
3016 mutex_init(&proc_buf->buf_mutex);
3017 proc_buf->health.dropped_events = 0;
3018 proc_buf->health.dropped_logs = 0;
3019 proc_buf->health.received_events = 0;
3020 proc_buf->health.received_logs = 0;
3021 proc_buf->buf_primary = kzalloc(
3022 sizeof(struct diag_dci_buffer_t),
3024 if (!proc_buf->buf_primary)
3026 proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
3028 if (!proc_buf->buf_cmd)
3030 err = diag_dci_init_buffer(proc_buf->buf_primary,
3034 err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
3037 proc_buf->buf_curr = proc_buf->buf_primary;
3040 list_add_tail(&new_entry->track, &driver->dci_client_list);
3041 driver->dci_client_id++;
3042 new_entry->client_info.client_id = driver->dci_client_id;
3043 reg_entry->client_id = driver->dci_client_id;
3044 driver->num_dci_client++;
3045 if (driver->num_dci_client == 1)
3046 diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP, reg_entry->token);
3047 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
3048 mutex_unlock(&driver->dci_mutex);
3050 return driver->dci_client_id;
3054 for (i = 0; ((i < new_entry->num_buffers) &&
3055 new_entry->buffers); i++) {
3056 proc_buf = &new_entry->buffers[i];
3058 mutex_destroy(&proc_buf->health_mutex);
3059 if (proc_buf->buf_primary) {
3060 vfree(proc_buf->buf_primary->data);
3061 proc_buf->buf_primary->data = NULL;
3063 &proc_buf->buf_primary->data_mutex);
3065 kfree(proc_buf->buf_primary);
3066 proc_buf->buf_primary = NULL;
3067 if (proc_buf->buf_cmd) {
3068 vfree(proc_buf->buf_cmd->data);
3069 proc_buf->buf_cmd->data = NULL;
3071 &proc_buf->buf_cmd->data_mutex);
3073 kfree(proc_buf->buf_cmd);
3074 proc_buf->buf_cmd = NULL;
3077 vfree(new_entry->dci_event_mask);
3078 new_entry->dci_event_mask = NULL;
3079 vfree(new_entry->dci_log_mask);
3080 new_entry->dci_log_mask = NULL;
3081 kfree(new_entry->buffers);
3082 new_entry->buffers = NULL;
3086 mutex_unlock(&driver->dci_mutex);
3087 return DIAG_DCI_NO_REG;
3090 int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
3092 int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
3093 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
3094 struct diag_dci_buffer_t *buf_entry, *temp;
3095 struct list_head *start, *req_temp;
3096 struct dci_pkt_req_entry_t *req_entry = NULL;
3097 int token = DCI_LOCAL_PROC;
3100 return DIAG_DCI_NOT_SUPPORTED;
3102 token = entry->client_info.token;
3104 * Remove the entry from the list before freeing the buffers
3105 * to ensure that we don't have any invalid access.
3107 if (!list_empty(&entry->track))
3108 list_del(&entry->track);
3109 driver->num_dci_client--;
3111 put_task_struct(entry->client);
3112 entry->client = NULL;
3114 * Clear the client's log and event masks, update the cumulative
3115 * masks and send the masks to peripherals
3117 vfree(entry->dci_log_mask);
3118 entry->dci_log_mask = NULL;
3119 diag_dci_invalidate_cumulative_log_mask(token);
3120 if (token == DCI_LOCAL_PROC)
3121 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
3122 ret = dci_ops_tbl[token].send_log_mask(token);
3123 if (ret != DIAG_DCI_NO_ERROR) {
3126 vfree(entry->dci_event_mask);
3127 entry->dci_event_mask = NULL;
3128 diag_dci_invalidate_cumulative_event_mask(token);
3129 if (token == DCI_LOCAL_PROC)
3130 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
3131 ret = dci_ops_tbl[token].send_event_mask(token);
3132 if (ret != DIAG_DCI_NO_ERROR) {
3136 list_for_each_safe(start, req_temp, &driver->dci_req_list) {
3137 req_entry = list_entry(start, struct dci_pkt_req_entry_t,
3139 if (req_entry->client_id == entry->client_info.client_id) {
3140 if (!list_empty(&req_entry->track))
3141 list_del(&req_entry->track);
3147 /* Clean up any buffer that is pending write */
3148 mutex_lock(&entry->write_buf_mutex);
3149 list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
3151 if (!list_empty(&buf_entry->buf_track))
3152 list_del(&buf_entry->buf_track);
3153 if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
3154 mutex_lock(&buf_entry->data_mutex);
3155 diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
3156 buf_entry->data = NULL;
3157 mutex_unlock(&buf_entry->data_mutex);
3160 } else if (buf_entry->buf_type == DCI_BUF_CMD) {
3161 peripheral = buf_entry->data_source;
3162 if (peripheral == APPS_DATA)
3166 * These are buffers that can't be written to the client which
3167 * means that the copy cannot be completed. Make sure that we
3168 * remove those references in DCI wakeup source.
3170 diag_ws_on_copy_fail(DIAG_WS_DCI);
3172 mutex_unlock(&entry->write_buf_mutex);
3174 for (i = 0; i < entry->num_buffers; i++) {
3175 proc_buf = &entry->buffers[i];
3176 buf_entry = proc_buf->buf_curr;
3177 mutex_lock(&proc_buf->buf_mutex);
3178 /* Clean up secondary buffer from mempool that is active */
3179 if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
3180 mutex_lock(&buf_entry->data_mutex);
3181 diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
3182 buf_entry->data = NULL;
3183 mutex_unlock(&buf_entry->data_mutex);
3184 mutex_destroy(&buf_entry->data_mutex);
3189 mutex_lock(&proc_buf->buf_primary->data_mutex);
3190 vfree(proc_buf->buf_primary->data);
3191 proc_buf->buf_primary->data = NULL;
3192 mutex_unlock(&proc_buf->buf_primary->data_mutex);
3194 mutex_lock(&proc_buf->buf_cmd->data_mutex);
3195 vfree(proc_buf->buf_cmd->data);
3196 proc_buf->buf_cmd->data = NULL;
3197 mutex_unlock(&proc_buf->buf_cmd->data_mutex);
3199 mutex_destroy(&proc_buf->health_mutex);
3200 mutex_destroy(&proc_buf->buf_primary->data_mutex);
3201 mutex_destroy(&proc_buf->buf_cmd->data_mutex);
3203 kfree(proc_buf->buf_primary);
3204 proc_buf->buf_primary = NULL;
3205 kfree(proc_buf->buf_cmd);
3206 proc_buf->buf_cmd = NULL;
3207 mutex_unlock(&proc_buf->buf_mutex);
3209 mutex_destroy(&entry->write_buf_mutex);
3211 kfree(entry->buffers);
3212 entry->buffers = NULL;
3216 if (driver->num_dci_client == 0) {
3217 diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
3219 real_time = diag_dci_get_cumulative_real_time(token);
3220 diag_update_real_time_vote(DIAG_PROC_DCI, real_time, token);
3222 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
3224 return DIAG_DCI_NO_ERROR;
3227 int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
3229 uint8_t dest_channel = TYPE_DATA;
3232 if (!buf || peripheral >= NUM_PERIPHERALS || len < 0 ||
3233 !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
3234 DIAG_LOG(DIAG_DEBUG_DCI,
3235 "buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
3236 buf, peripheral, len,
3237 driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
3241 if (pkt_type == DIAG_DATA_TYPE) {
3242 dest_channel = TYPE_DCI_CMD;
3243 } else if (pkt_type == DIAG_CNTL_TYPE) {
3244 dest_channel = TYPE_CNTL;
3246 pr_err("diag: Invalid DCI pkt type in %s", __func__);
3250 err = diagfwd_write(peripheral, dest_channel, buf, len);
3251 if (err && err != -ENODEV) {
3252 pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
3253 __func__, peripheral, dest_channel, len, err);
3255 err = DIAG_DCI_NO_ERROR;
3261 int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc)
3263 struct diag_dci_client_tbl *entry = NULL;
3264 struct diag_dci_health_t *health = NULL;
3265 struct diag_dci_health_stats *stats = NULL;
3271 stats = &stats_proc->health;
3272 proc = stats_proc->proc;
3273 if (proc < ALL_PROC || proc > APPS_DATA)
3276 entry = diag_dci_get_client_entry(stats_proc->client_id);
3278 return DIAG_DCI_NOT_SUPPORTED;
3281 * If the client has registered for remote processor, the
3282 * proc field doesn't have any effect as they have only one buffer.
3284 if (entry->client_info.token)
3287 stats->stats.dropped_logs = 0;
3288 stats->stats.dropped_events = 0;
3289 stats->stats.received_logs = 0;
3290 stats->stats.received_events = 0;
3292 if (proc != ALL_PROC) {
3293 health = &entry->buffers[proc].health;
3294 stats->stats.dropped_logs = health->dropped_logs;
3295 stats->stats.dropped_events = health->dropped_events;
3296 stats->stats.received_logs = health->received_logs;
3297 stats->stats.received_events = health->received_events;
3298 if (stats->reset_status) {
3299 mutex_lock(&entry->buffers[proc].health_mutex);
3300 health->dropped_logs = 0;
3301 health->dropped_events = 0;
3302 health->received_logs = 0;
3303 health->received_events = 0;
3304 mutex_unlock(&entry->buffers[proc].health_mutex);
3306 return DIAG_DCI_NO_ERROR;
3309 for (i = 0; i < entry->num_buffers; i++) {
3310 health = &entry->buffers[i].health;
3311 stats->stats.dropped_logs += health->dropped_logs;
3312 stats->stats.dropped_events += health->dropped_events;
3313 stats->stats.received_logs += health->received_logs;
3314 stats->stats.received_events += health->received_events;
3315 if (stats->reset_status) {
3316 mutex_lock(&entry->buffers[i].health_mutex);
3317 health->dropped_logs = 0;
3318 health->dropped_events = 0;
3319 health->received_logs = 0;
3320 health->received_events = 0;
3321 mutex_unlock(&entry->buffers[i].health_mutex);
3324 return DIAG_DCI_NO_ERROR;
3327 int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list)
3332 if (!VALID_DCI_TOKEN(support_list->proc))
3335 support_list->list = dci_ops_tbl[support_list->proc].peripheral_status;
3336 return DIAG_DCI_NO_ERROR;